From c390d9b979365cb65c00ba61bd4d3e759fea8691 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Mon, 29 Apr 2024 17:15:22 -0400 Subject: [PATCH 01/29] add test for hour_angle_filter --- rdtools/__init__.py | 1 + rdtools/test/filtering_test.py | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/rdtools/__init__.py b/rdtools/__init__.py index 26c82995..ff7a4eb7 100644 --- a/rdtools/__init__.py +++ b/rdtools/__init__.py @@ -19,6 +19,7 @@ from rdtools.filtering import logic_clip_filter from rdtools.filtering import xgboost_clip_filter from rdtools.filtering import normalized_filter +from rdtools.filtering import hour_angle_filter # from rdtools.soiling import soiling_srr # from rdtools.soiling import soiling_cods # from rdtools.soiling import monthly_soiling_rates diff --git a/rdtools/test/filtering_test.py b/rdtools/test/filtering_test.py index c5ca1fc9..84085c32 100644 --- a/rdtools/test/filtering_test.py +++ b/rdtools/test/filtering_test.py @@ -11,7 +11,8 @@ quantile_clip_filter, normalized_filter, logic_clip_filter, - xgboost_clip_filter) + xgboost_clip_filter, + hour_angle_filter) import warnings from conftest import assert_warnings @@ -359,3 +360,23 @@ def test_normalized_filter_default(): pd.testing.assert_series_equal(normalized_filter( pd.Series([0.01 - eps, 0.01 + eps, 1e308])), pd.Series([False, True, True])) + + +def test_hour_angle_filter(): + # Create a pandas Series with 5 entries and 15 min index + index = pd.date_range(start='29/04/2022 15:00', periods=5, freq='H') + series = pd.Series([1, 2, 3, 4, 5], index=index) + + # Define latitude and longitude + lat, lon = 39.7413, -105.1684 # NREL, Golden, CO + + # Call the function with the test data + result = hour_angle_filter(series, lat, lon) + + # Check that the result is a pandas Series of the same length as the input + assert isinstance(result, pd.Series) + assert len(result) == len(series) + + # Check that the result is the correct boolean Series + expected_result = np.array([False, False, True, True, True]) + assert (result == expected_result).all() From e3661762c47f2c8b271e54999a2894a51a78988d Mon Sep 17 00:00:00 2001 From: martin-springer Date: Mon, 29 Apr 2024 17:28:24 -0400 Subject: [PATCH 02/29] add test_directional_tukey_filter --- rdtools/__init__.py | 1 + rdtools/test/filtering_test.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/rdtools/__init__.py b/rdtools/__init__.py index ff7a4eb7..c0bc9e56 100644 --- a/rdtools/__init__.py +++ b/rdtools/__init__.py @@ -20,6 +20,7 @@ from rdtools.filtering import xgboost_clip_filter from rdtools.filtering import normalized_filter from rdtools.filtering import hour_angle_filter +from rdtools.filtering import directional_tukey_filter # from rdtools.soiling import soiling_srr # from rdtools.soiling import soiling_cods # from rdtools.soiling import monthly_soiling_rates diff --git a/rdtools/test/filtering_test.py b/rdtools/test/filtering_test.py index 84085c32..dc4a1df4 100644 --- a/rdtools/test/filtering_test.py +++ b/rdtools/test/filtering_test.py @@ -12,6 +12,7 @@ normalized_filter, logic_clip_filter, xgboost_clip_filter, + directional_tukey_filter, hour_angle_filter) import warnings from conftest import assert_warnings @@ -362,6 +363,23 @@ def test_normalized_filter_default(): pd.Series([False, True, True])) +def test_directional_tukey_filter(): + # Create a pandas Series with 10 entries and daily index + index = pd.date_range(start='1/1/2022', periods=7, freq='D') + series = pd.Series([1, 2, 3, 25, 4, 5, 6], index=index) + + # Call the function with the test data + result = directional_tukey_filter(series) + + # Check that the result is a pandas Series of the same length as the input + assert isinstance(result, pd.Series) + assert len(result) == len(series) + + # Check that the result is as expected + expected_result = pd.Series([True, True, True, False, True, True, True], index=index) + pd.testing.assert_series_equal(result, expected_result) + + def test_hour_angle_filter(): # Create a pandas Series with 5 entries and 15 min index index = pd.date_range(start='29/04/2022 15:00', periods=5, freq='H') @@ -378,5 +396,5 @@ def test_hour_angle_filter(): assert len(result) == len(series) # Check that the result is the correct boolean Series - expected_result = np.array([False, False, True, True, True]) - assert (result == expected_result).all() + expected_result = pd.Series([False, False, True, True, True], index=index) + pd.testing.assert_series_equal(result, expected_result) From f99a2149053093444b83ab79631f61b171329f44 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Mon, 29 Apr 2024 17:40:38 -0400 Subject: [PATCH 03/29] add test_insolation_filter --- rdtools/__init__.py | 2 ++ rdtools/test/filtering_test.py | 44 +++++++++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/rdtools/__init__.py b/rdtools/__init__.py index c0bc9e56..b9f13cd5 100644 --- a/rdtools/__init__.py +++ b/rdtools/__init__.py @@ -19,6 +19,8 @@ from rdtools.filtering import logic_clip_filter from rdtools.filtering import xgboost_clip_filter from rdtools.filtering import normalized_filter +from rdtools.filtering import insolation_filter +from rdtools.filtering import hampel_filter from rdtools.filtering import hour_angle_filter from rdtools.filtering import directional_tukey_filter # from rdtools.soiling import soiling_srr diff --git a/rdtools/test/filtering_test.py b/rdtools/test/filtering_test.py index dc4a1df4..cf811d5a 100644 --- a/rdtools/test/filtering_test.py +++ b/rdtools/test/filtering_test.py @@ -12,6 +12,8 @@ normalized_filter, logic_clip_filter, xgboost_clip_filter, + insolation_filter, + hampel_filter, directional_tukey_filter, hour_angle_filter) import warnings @@ -363,6 +365,46 @@ def test_normalized_filter_default(): pd.Series([False, True, True])) +def test_insolation_filter(): + # Create a pandas Series with 10 entries + series = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + + # Call the function with the test data + result = insolation_filter(series) + + # Check that the result is a pandas Series of the same length as the input + assert isinstance(result, pd.Series) + assert len(result) == len(series) + + # Check that the result only contains boolean values + assert set(result.unique()).issubset({True, False}) + + # Check that the result is as expected + # Here we're checking that the bottom 10% of values are marked as False + expected_result = pd.Series([False] + [True]*9) + pd.testing.assert_series_equal(result, expected_result) + + +def test_hampel_filter(): + # Create a pandas Series with 10 entries and daily index + index = pd.date_range(start='1/1/2022', periods=10, freq='D') + series = pd.Series([1, 2, 3, 4, 100, 6, 7, 8, 9, 10], index=index) + + # Call the function with the test data + result = hampel_filter(series) + + # Check that the result is a pandas Series of the same length as the input + assert isinstance(result, pd.Series) + assert len(result) == len(series) + + # Check that the result only contains boolean values + assert set(result.unique()).issubset({True, False}) + + # Check that the result is as expected + expected_result = pd.Series([True]*3 + [True] + [False] + [True]*5, index=index) + pd.testing.assert_series_equal(result, expected_result) + + def test_directional_tukey_filter(): # Create a pandas Series with 10 entries and daily index index = pd.date_range(start='1/1/2022', periods=7, freq='D') @@ -386,7 +428,7 @@ def test_hour_angle_filter(): series = pd.Series([1, 2, 3, 4, 5], index=index) # Define latitude and longitude - lat, lon = 39.7413, -105.1684 # NREL, Golden, CO + lat, lon = 39.7413, -105.1684 # NREL, Golden, CO # Call the function with the test data result = hour_angle_filter(series, lat, lon) From 05b0af324c2e1fc3521b6ee83035c0f482321944 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Mon, 29 Apr 2024 17:46:20 -0400 Subject: [PATCH 04/29] add test_two_way_window_filter --- rdtools/__init__.py | 1 + rdtools/test/filtering_test.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/rdtools/__init__.py b/rdtools/__init__.py index b9f13cd5..342427c2 100644 --- a/rdtools/__init__.py +++ b/rdtools/__init__.py @@ -19,6 +19,7 @@ from rdtools.filtering import logic_clip_filter from rdtools.filtering import xgboost_clip_filter from rdtools.filtering import normalized_filter +from rdtools.filtering import two_way_window_filter from rdtools.filtering import insolation_filter from rdtools.filtering import hampel_filter from rdtools.filtering import hour_angle_filter diff --git a/rdtools/test/filtering_test.py b/rdtools/test/filtering_test.py index cf811d5a..1dff0cf9 100644 --- a/rdtools/test/filtering_test.py +++ b/rdtools/test/filtering_test.py @@ -12,6 +12,7 @@ normalized_filter, logic_clip_filter, xgboost_clip_filter, + two_way_window_filter, insolation_filter, hampel_filter, directional_tukey_filter, @@ -365,6 +366,27 @@ def test_normalized_filter_default(): pd.Series([False, True, True])) +def test_two_way_window_filter(): + # Create a pandas Series with 10 entries and daily index + index = pd.date_range(start='1/1/2022', periods=10, freq='D') + series = pd.Series([1, 2, 3, 4, 20, 6, 7, 8, 9, 10], index=index) + + # Call the function with the test data + result = two_way_window_filter(series) + + # Check that the result is a pandas Series of the same length as the input + assert isinstance(result, pd.Series) + assert len(result) == len(series) + + # Check that the result only contains boolean values + assert set(result.unique()).issubset({True, False}) + + # Check that the result is as expected + # Here we're checking that the outlier is marked as False + expected_result = pd.Series([True]*4 + [False]*2 + [True]*4, index=index) + pd.testing.assert_series_equal(result, expected_result) + + def test_insolation_filter(): # Create a pandas Series with 10 entries series = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) From dd06d29ab41a7a1392bc25aada60166587887fd5 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Mon, 29 Apr 2024 18:18:30 -0400 Subject: [PATCH 05/29] add bootstrap additive test --- rdtools/test/bootstrap_test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/rdtools/test/bootstrap_test.py b/rdtools/test/bootstrap_test.py index ee3c3218..1abfd1d9 100644 --- a/rdtools/test/bootstrap_test.py +++ b/rdtools/test/bootstrap_test.py @@ -1,18 +1,21 @@ '''Bootstrap module tests.''' +import pytest + from rdtools.bootstrap import _construct_confidence_intervals, \ _make_time_series_bootstrap_samples from rdtools.degradation import degradation_year_on_year -def test_bootstrap_module(cods_normalized_daily, cods_normalized_daily_wo_noise): +@pytest.mark.parametrize("decomposition_type", ["multiplicative", "additive"]) +def test_bootstrap_module(cods_normalized_daily, cods_normalized_daily_wo_noise, decomposition_type): ''' Test make time serie bootstrap samples and construct of confidence intervals. ''' # Test make bootstrap samples bootstrap_samples = _make_time_series_bootstrap_samples(cods_normalized_daily, cods_normalized_daily_wo_noise, sample_nr=10, block_length=90, - decomposition_type='multiplicative') + decomposition_type=decomposition_type) # Check if results are as expected assert (bootstrap_samples.index == cods_normalized_daily.index).all(), \ "Index of bootstrapped signals is not as expected" @@ -30,3 +33,4 @@ def test_bootstrap_module(cods_normalized_daily, cods_normalized_daily_wo_noise) assert len(metrics) == 10, "Length of metrics is not as expected" for m in metrics: assert isinstance(m, float), "Not all metrics are float" + From 817220827c0623f47e9f738e8995af17381e207e Mon Sep 17 00:00:00 2001 From: martin-springer Date: Mon, 29 Apr 2024 18:19:10 -0400 Subject: [PATCH 06/29] bootstrap_test fix linting --- rdtools/test/bootstrap_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rdtools/test/bootstrap_test.py b/rdtools/test/bootstrap_test.py index 1abfd1d9..0b05c856 100644 --- a/rdtools/test/bootstrap_test.py +++ b/rdtools/test/bootstrap_test.py @@ -8,7 +8,9 @@ @pytest.mark.parametrize("decomposition_type", ["multiplicative", "additive"]) -def test_bootstrap_module(cods_normalized_daily, cods_normalized_daily_wo_noise, decomposition_type): +def test_bootstrap_module( + cods_normalized_daily, cods_normalized_daily_wo_noise, decomposition_type +): ''' Test make time serie bootstrap samples and construct of confidence intervals. ''' # Test make bootstrap samples bootstrap_samples = _make_time_series_bootstrap_samples(cods_normalized_daily, From c45d438d6311949f21aca3892499f9660759b820 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 30 Apr 2024 09:10:30 -0400 Subject: [PATCH 07/29] run blake --- rdtools/test/bootstrap_test.py | 40 ++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/rdtools/test/bootstrap_test.py b/rdtools/test/bootstrap_test.py index 0b05c856..e066a024 100644 --- a/rdtools/test/bootstrap_test.py +++ b/rdtools/test/bootstrap_test.py @@ -1,9 +1,11 @@ -'''Bootstrap module tests.''' +"""Bootstrap module tests.""" import pytest -from rdtools.bootstrap import _construct_confidence_intervals, \ - _make_time_series_bootstrap_samples +from rdtools.bootstrap import ( + _construct_confidence_intervals, + _make_time_series_bootstrap_samples, +) from rdtools.degradation import degradation_year_on_year @@ -11,28 +13,34 @@ def test_bootstrap_module( cods_normalized_daily, cods_normalized_daily_wo_noise, decomposition_type ): - ''' Test make time serie bootstrap samples and construct of confidence intervals. ''' + """Test make time serie bootstrap samples and construct of confidence intervals.""" # Test make bootstrap samples - bootstrap_samples = _make_time_series_bootstrap_samples(cods_normalized_daily, - cods_normalized_daily_wo_noise, - sample_nr=10, - block_length=90, - decomposition_type=decomposition_type) + bootstrap_samples = _make_time_series_bootstrap_samples( + cods_normalized_daily, + cods_normalized_daily_wo_noise, + sample_nr=10, + block_length=90, + decomposition_type=decomposition_type, + ) # Check if results are as expected - assert (bootstrap_samples.index == cods_normalized_daily.index).all(), \ - "Index of bootstrapped signals is not as expected" - assert bootstrap_samples.shape[1] == 10, "Number of columns in bootstrapped signals is wrong" + assert ( + bootstrap_samples.index == cods_normalized_daily.index + ).all(), "Index of bootstrapped signals is not as expected" + assert ( + bootstrap_samples.shape[1] == 10 + ), "Number of columns in bootstrapped signals is wrong" # Test construction of confidence intervals confidence_intervals, exceedance_level, metrics = _construct_confidence_intervals( - bootstrap_samples, degradation_year_on_year, uncertainty_method='none') + bootstrap_samples, degradation_year_on_year, uncertainty_method="none" + ) # Check if results are as expected assert len(confidence_intervals) == 2, "2 confidence interval bounds not returned" - assert isinstance(confidence_intervals[0], float) and \ - isinstance(confidence_intervals[1], float), "Confidence interval bounds are not float" + assert isinstance(confidence_intervals[0], float) and isinstance( + confidence_intervals[1], float + ), "Confidence interval bounds are not float" assert isinstance(exceedance_level, float), "Exceedance level is not float" assert len(metrics) == 10, "Length of metrics is not as expected" for m in metrics: assert isinstance(m, float), "Not all metrics are float" - From 4c84bdcc63834b7f21bdec509fedce1905a399a6 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 30 Apr 2024 09:18:10 -0400 Subject: [PATCH 08/29] bootstrap test for value error --- rdtools/test/bootstrap_test.py | 74 ++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 30 deletions(-) diff --git a/rdtools/test/bootstrap_test.py b/rdtools/test/bootstrap_test.py index e066a024..52028e88 100644 --- a/rdtools/test/bootstrap_test.py +++ b/rdtools/test/bootstrap_test.py @@ -9,38 +9,52 @@ from rdtools.degradation import degradation_year_on_year -@pytest.mark.parametrize("decomposition_type", ["multiplicative", "additive"]) +@pytest.mark.parametrize("decomposition_type", ["multiplicative", "additive", "error"]) def test_bootstrap_module( cods_normalized_daily, cods_normalized_daily_wo_noise, decomposition_type ): - """Test make time serie bootstrap samples and construct of confidence intervals.""" - # Test make bootstrap samples - bootstrap_samples = _make_time_series_bootstrap_samples( - cods_normalized_daily, - cods_normalized_daily_wo_noise, - sample_nr=10, - block_length=90, - decomposition_type=decomposition_type, - ) - # Check if results are as expected - assert ( - bootstrap_samples.index == cods_normalized_daily.index - ).all(), "Index of bootstrapped signals is not as expected" - assert ( - bootstrap_samples.shape[1] == 10 - ), "Number of columns in bootstrapped signals is wrong" - # Test construction of confidence intervals - confidence_intervals, exceedance_level, metrics = _construct_confidence_intervals( - bootstrap_samples, degradation_year_on_year, uncertainty_method="none" - ) + if decomposition_type == "error": + pytest.raises( + ValueError, + _make_time_series_bootstrap_samples, + cods_normalized_daily, + cods_normalized_daily_wo_noise, + decomposition_type=decomposition_type, + ) + else: + # Rest make time serie bootstrap samples and construct of confidence intervals. + # Test make bootstrap samples + bootstrap_samples = _make_time_series_bootstrap_samples( + cods_normalized_daily, + cods_normalized_daily_wo_noise, + sample_nr=10, + block_length=90, + decomposition_type=decomposition_type, + ) + # Check if results are as expected + assert ( + bootstrap_samples.index == cods_normalized_daily.index + ).all(), "Index of bootstrapped signals is not as expected" + assert ( + bootstrap_samples.shape[1] == 10 + ), "Number of columns in bootstrapped signals is wrong" - # Check if results are as expected - assert len(confidence_intervals) == 2, "2 confidence interval bounds not returned" - assert isinstance(confidence_intervals[0], float) and isinstance( - confidence_intervals[1], float - ), "Confidence interval bounds are not float" - assert isinstance(exceedance_level, float), "Exceedance level is not float" - assert len(metrics) == 10, "Length of metrics is not as expected" - for m in metrics: - assert isinstance(m, float), "Not all metrics are float" + # Test construction of confidence intervals + confidence_intervals, exceedance_level, metrics = ( + _construct_confidence_intervals( + bootstrap_samples, degradation_year_on_year, uncertainty_method="none" + ) + ) + + # Check if results are as expected + assert ( + len(confidence_intervals) == 2 + ), "2 confidence interval bounds not returned" + assert isinstance(confidence_intervals[0], float) and isinstance( + confidence_intervals[1], float + ), "Confidence interval bounds are not float" + assert isinstance(exceedance_level, float), "Exceedance level is not float" + assert len(metrics) == 10, "Length of metrics is not as expected" + for m in metrics: + assert isinstance(m, float), "Not all metrics are float" From f1378d99006f3af25675bb250487a8d082641b34 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 30 Apr 2024 09:31:49 -0400 Subject: [PATCH 09/29] irradiance rescale test for value error --- rdtools/test/irradiance_rescale_test.py | 63 +++++++++++++++---------- 1 file changed, 38 insertions(+), 25 deletions(-) diff --git a/rdtools/test/irradiance_rescale_test.py b/rdtools/test/irradiance_rescale_test.py index b065dde8..9cec8418 100644 --- a/rdtools/test/irradiance_rescale_test.py +++ b/rdtools/test/irradiance_rescale_test.py @@ -7,19 +7,28 @@ @pytest.fixture def simple_irradiance(): - times = pd.date_range('2019-06-01 12:00', freq='15T', periods=5) + times = pd.date_range("2019-06-01 12:00", freq="15T", periods=5) time_series = pd.Series([1, 2, 3, 4, 5], index=times, dtype=float) return time_series -@pytest.mark.parametrize("method", ['iterative', 'single_opt']) +@pytest.mark.parametrize("method", ["iterative", "single_opt", "error"]) def test_rescale(method, simple_irradiance): # test basic functionality - modeled = simple_irradiance - measured = 1.05 * simple_irradiance - rescaled = irradiance_rescale(measured, modeled, method=method) - expected = measured - assert_series_equal(rescaled, expected, check_exact=False) + if method == "error": + pytest.raises( + ValueError, + irradiance_rescale, + simple_irradiance, + simple_irradiance * 1.05, + method=method, + ) + else: + modeled = simple_irradiance + measured = 1.05 * simple_irradiance + rescaled = irradiance_rescale(measured, modeled, method=method) + expected = measured + assert_series_equal(rescaled, expected, check_exact=False) def test_max_iterations(simple_irradiance): @@ -31,11 +40,9 @@ def test_max_iterations(simple_irradiance): modeled.iloc[4] *= 0.8 with pytest.raises(ConvergenceError): - _ = irradiance_rescale(measured, modeled, method='iterative', - max_iterations=2) + _ = irradiance_rescale(measured, modeled, method="iterative", max_iterations=2) - _ = irradiance_rescale(measured, modeled, method='iterative', - max_iterations=10) + _ = irradiance_rescale(measured, modeled, method="iterative", max_iterations=10) def test_max_iterations_zero(simple_irradiance): @@ -43,26 +50,32 @@ def test_max_iterations_zero(simple_irradiance): # test series already close enough true_factor = 1.0 + 1e-8 - rescaled = irradiance_rescale(simple_irradiance, - simple_irradiance * true_factor, - max_iterations=0, - method='iterative') + rescaled = irradiance_rescale( + simple_irradiance, + simple_irradiance * true_factor, + max_iterations=0, + method="iterative", + ) assert_series_equal(rescaled, simple_irradiance, check_exact=False) # tighten threshold so that it isn't already close enough with pytest.raises(ConvergenceError): - _ = irradiance_rescale(simple_irradiance, - simple_irradiance * true_factor, - max_iterations=0, - convergence_threshold=1e-9, - method='iterative') + _ = irradiance_rescale( + simple_irradiance, + simple_irradiance * true_factor, + max_iterations=0, + convergence_threshold=1e-9, + method="iterative", + ) def test_convergence_threshold(simple_irradiance): # can't converge if threshold is negative with pytest.raises(ConvergenceError): - _ = irradiance_rescale(simple_irradiance, - simple_irradiance * 1.05, - max_iterations=5, # reduced count for speed - convergence_threshold=-1, - method='iterative') + _ = irradiance_rescale( + simple_irradiance, + simple_irradiance * 1.05, + max_iterations=5, # reduced count for speed + convergence_threshold=-1, + method="iterative", + ) From b072b570ff1f15ca614fd38a8e9ac39bcf333ffd Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 30 Apr 2024 09:40:34 -0400 Subject: [PATCH 10/29] test for ValueError --- rdtools/test/energy_from_power_test.py | 87 ++++++++++++--------- rdtools/test/interpolate_test.py | 102 ++++++++++++++++--------- 2 files changed, 115 insertions(+), 74 deletions(-) diff --git a/rdtools/test/energy_from_power_test.py b/rdtools/test/energy_from_power_test.py index cf4230e4..5ef7a5d6 100644 --- a/rdtools/test/energy_from_power_test.py +++ b/rdtools/test/energy_from_power_test.py @@ -6,7 +6,7 @@ @pytest.fixture def times(): - return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T') + return pd.date_range(start="20200101 12:00", end="20200101 13:00", freq="15T") @pytest.fixture @@ -15,72 +15,75 @@ def power(times): def test_energy_from_power_single_arg(power): - expected = power.iloc[1:]*0.25 - expected.name = 'energy_Wh' + expected = power.iloc[1:] * 0.25 + expected.name = "energy_Wh" result = energy_from_power(power) pd.testing.assert_series_equal(result, expected) def test_energy_from_power_instantaneous(power): - expected = (0.25*(power + power.shift())/2).dropna() - expected.name = 'energy_Wh' - result = energy_from_power(power, power_type='instantaneous') + expected = (0.25 * (power + power.shift()) / 2).dropna() + expected.name = "energy_Wh" + result = energy_from_power(power, power_type="instantaneous") pd.testing.assert_series_equal(result, expected) def test_energy_from_power_max_timedelta_inference(power): - expected = power.iloc[1:]*0.25 - expected.name = 'energy_Wh' + expected = power.iloc[1:] * 0.25 + expected.name = "energy_Wh" expected.iloc[:2] = np.nan - match = 'Fraction of excluded data (.*) exceeded threshold' + match = "Fraction of excluded data (.*) exceeded threshold" with pytest.warns(UserWarning, match=match): result = energy_from_power(power.drop(power.index[1])) pd.testing.assert_series_equal(result, expected) def test_energy_from_power_max_timedelta(power): - expected = power.iloc[1:]*0.25 - expected.name = 'energy_Wh' - result = energy_from_power(power.drop(power.index[1]), - max_timedelta=pd.to_timedelta('30 minutes')) + expected = power.iloc[1:] * 0.25 + expected.name = "energy_Wh" + result = energy_from_power( + power.drop(power.index[1]), max_timedelta=pd.to_timedelta("30 minutes") + ) pd.testing.assert_series_equal(result, expected) def test_energy_from_power_upsample(power): - expected = power.resample('10T').asfreq().interpolate()/6 + expected = power.resample("10T").asfreq().interpolate() / 6 expected = expected.iloc[1:] - expected.name = 'energy_Wh' - result = energy_from_power(power, target_frequency='10T') + expected.name = "energy_Wh" + result = energy_from_power(power, target_frequency="10T") pd.testing.assert_series_equal(result, expected) def test_energy_from_power_downsample(power): - expected = power.resample('20T').asfreq() + expected = power.resample("20T").asfreq() expected = expected.iloc[1:] expected = pd.Series([0.75, 0.833333333, 0.416666667], index=expected.index) - expected.name = 'energy_Wh' - result = energy_from_power(power, target_frequency='20T') + expected.name = "energy_Wh" + result = energy_from_power(power, target_frequency="20T") pd.testing.assert_series_equal(result, expected) def test_energy_from_power_max_timedelta_edge_case(): - times = pd.date_range('2020-01-01 12:00', periods=4, freq='15T') + times = pd.date_range("2020-01-01 12:00", periods=4, freq="15T") power = pd.Series(1, index=times) power = power.drop(power.index[2]) - result = energy_from_power(power, '30T', max_timedelta=pd.to_timedelta('20 minutes')) + result = energy_from_power( + power, "30T", max_timedelta=pd.to_timedelta("20 minutes") + ) assert result.isnull().all() def test_energy_from_power_single_value_input(): - times = pd.date_range('2019-01-01', freq='15T', periods=1) - power = pd.Series([100.], index=times) - expected_result = pd.Series([25.], index=times, name='energy_Wh') + times = pd.date_range("2019-01-01", freq="15T", periods=1) + power = pd.Series([100.0], index=times) + expected_result = pd.Series([25.0], index=times, name="energy_Wh") result = energy_from_power(power) pd.testing.assert_series_equal(result, expected_result) def test_energy_from_power_single_value_input_no_freq(): - power = pd.Series([1], pd.date_range('2019-01-01', periods=1, freq='15T')) + power = pd.Series([1], pd.date_range("2019-01-01", periods=1, freq="15T")) power.index.freq = None match = "Could not determine period of input power" with pytest.raises(ValueError, match=match): @@ -88,27 +91,39 @@ def test_energy_from_power_single_value_input_no_freq(): def test_energy_from_power_single_value_instantaneous(): - power = pd.Series([1], pd.date_range('2019-01-01', periods=1, freq='15T')) + power = pd.Series([1], pd.date_range("2019-01-01", periods=1, freq="15T")) power.index.freq = None - match = ("power_type='instantaneous' is incompatible with single element power. " - "Use power_type='right-labeled'") + match = ( + "power_type='instantaneous' is incompatible with single element power. " + "Use power_type='right-labeled'" + ) with pytest.raises(ValueError, match=match): - energy_from_power(power, power_type='instantaneous') + energy_from_power(power, power_type="instantaneous") def test_energy_from_power_single_value_with_target(): - times = pd.date_range('2019-01-01', freq='15T', periods=1) - power = pd.Series([100.], index=times) - expected_result = pd.Series([100.], index=times, name='energy_Wh') - result = energy_from_power(power, target_frequency='H') + times = pd.date_range("2019-01-01", freq="15T", periods=1) + power = pd.Series([100.0], index=times) + expected_result = pd.Series([100.0], index=times, name="energy_Wh") + result = energy_from_power(power, target_frequency="H") pd.testing.assert_series_equal(result, expected_result) def test_energy_from_power_leading_nans(): # GH 244 - power = pd.Series(1, pd.date_range('2019-01-01', freq='15min', periods=5)) + power = pd.Series(1, pd.date_range("2019-01-01", freq="15min", periods=5)) power.iloc[:2] = np.nan - expected_result = pd.Series([np.nan, np.nan, 0.25, 0.25], - index=power.index[1:], name='energy_Wh') + expected_result = pd.Series( + [np.nan, np.nan, 0.25, 0.25], index=power.index[1:], name="energy_Wh" + ) result = energy_from_power(power) pd.testing.assert_series_equal(result, expected_result) + + +def test_energy_from_power_series_index(): + power = pd.Series([1, 2, 3, 4, 5]) + pytest.raises( + ValueError, + energy_from_power, + power, + ) diff --git a/rdtools/test/interpolate_test.py b/rdtools/test/interpolate_test.py index 40bac08b..9fbcd1d2 100644 --- a/rdtools/test/interpolate_test.py +++ b/rdtools/test/interpolate_test.py @@ -7,20 +7,22 @@ @pytest.fixture def time_series(): - times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:15', freq='15T') - time_series = pd.Series(data=[9, 6, 3, 3, 6, 9], index=times, name='foo') + times = pd.date_range("2018-04-01 12:00", "2018-04-01 13:15", freq="15T") + time_series = pd.Series(data=[9, 6, 3, 3, 6, 9], index=times, name="foo") time_series = time_series.drop(times[4]) return time_series @pytest.fixture def target_index(time_series): - return pd.date_range(time_series.index.min(), time_series.index.max(), freq='20T') + return pd.date_range(time_series.index.min(), time_series.index.max(), freq="20T") @pytest.fixture def expected_series(target_index, time_series): - return pd.Series(data=[9.0, 5.0, 3.0, np.nan], index=target_index, name=time_series.name) + return pd.Series( + data=[9.0, 5.0, 3.0, np.nan], index=target_index, name=time_series.name + ) @pytest.fixture @@ -28,8 +30,8 @@ def test_df(time_series): time_series1 = time_series.copy() time_series2 = time_series.copy() - time_series2.index = time_series2.index + pd.to_timedelta('30 minutes') - time_series2.name = 'bar' + time_series2.index = time_series2.index + pd.to_timedelta("30 minutes") + time_series2.name = "bar" test_df = pd.concat([time_series1, time_series2], axis=1) @@ -38,17 +40,17 @@ def test_df(time_series): @pytest.fixture def df_target_index(target_index): - return target_index + pd.to_timedelta('15 minutes') + return target_index + pd.to_timedelta("15 minutes") @pytest.fixture def df_expected_result(df_target_index, test_df): col0 = test_df.columns[0] col1 = test_df.columns[1] - expected_df_result = pd.DataFrame({ - col0: [6.0, 3.0, np.nan, 9.0], - col1: [np.nan, 8.0, 4.0, 3.0] - }, index=df_target_index) + expected_df_result = pd.DataFrame( + {col0: [6.0, 3.0, np.nan, 9.0], col1: [np.nan, 8.0, 4.0, 3.0]}, + index=df_target_index, + ) expected_df_result = expected_df_result[test_df.columns] return expected_df_result @@ -56,20 +58,29 @@ def df_expected_result(df_target_index, test_df): def test_interpolate_freq_specification(time_series, target_index, expected_series): # test the string specification - interpolated = interpolate(time_series, target_index.freq.freqstr, - pd.to_timedelta('15 minutes'), warning_threshold=0.21) + interpolated = interpolate( + time_series, + target_index.freq.freqstr, + pd.to_timedelta("15 minutes"), + warning_threshold=0.21, + ) pd.testing.assert_series_equal(interpolated, expected_series) # test the DateOffset specification - interpolated = interpolate(time_series, target_index.freq, pd.to_timedelta('15 minutes'), - warning_threshold=0.21) + interpolated = interpolate( + time_series, + target_index.freq, + pd.to_timedelta("15 minutes"), + warning_threshold=0.21, + ) pd.testing.assert_series_equal(interpolated, expected_series) def test_interpolate_calculation(time_series, target_index, expected_series): - interpolated = interpolate(time_series, target_index, pd.to_timedelta('15 minutes'), - warning_threshold=0.21) + interpolated = interpolate( + time_series, target_index, pd.to_timedelta("15 minutes"), warning_threshold=0.21 + ) pd.testing.assert_series_equal(interpolated, expected_series) @@ -82,25 +93,28 @@ def test_interpolate_two_argument(time_series, target_index, expected_series): def test_interpolate_tz_validation(time_series, target_index, expected_series): with pytest.raises(ValueError): - interpolate(time_series, target_index.tz_localize('UTC'), pd.to_timedelta('15 minutes')) + interpolate( + time_series, target_index.tz_localize("UTC"), pd.to_timedelta("15 minutes") + ) time_series = time_series.copy() - time_series.index = time_series.index.tz_localize('UTC') + time_series.index = time_series.index.tz_localize("UTC") with pytest.raises(ValueError): - interpolate(time_series, target_index, pd.to_timedelta('15 minutes')) + interpolate(time_series, target_index, pd.to_timedelta("15 minutes")) def test_interpolate_same_tz(time_series, target_index, expected_series): time_series = time_series.copy() expected_series = expected_series.copy() - time_series.index = time_series.index.tz_localize('America/Denver') - target_index = target_index.tz_localize('America/Denver') - expected_series.index = expected_series.index.tz_localize('America/Denver') + time_series.index = time_series.index.tz_localize("America/Denver") + target_index = target_index.tz_localize("America/Denver") + expected_series.index = expected_series.index.tz_localize("America/Denver") - interpolated = interpolate(time_series, target_index, pd.to_timedelta('15 minutes'), - warning_threshold=0.21) + interpolated = interpolate( + time_series, target_index, pd.to_timedelta("15 minutes"), warning_threshold=0.21 + ) pd.testing.assert_series_equal(interpolated, expected_series) @@ -108,18 +122,22 @@ def test_interpolate_different_tz(time_series, target_index, expected_series): time_series = time_series.copy() expected_series = expected_series.copy() - time_series.index = time_series.index.tz_localize('America/Denver').tz_convert('UTC') - target_index = target_index.tz_localize('America/Denver') - expected_series.index = expected_series.index.tz_localize('America/Denver') + time_series.index = time_series.index.tz_localize("America/Denver").tz_convert( + "UTC" + ) + target_index = target_index.tz_localize("America/Denver") + expected_series.index = expected_series.index.tz_localize("America/Denver") - interpolated = interpolate(time_series, target_index, pd.to_timedelta('15 minutes'), - warning_threshold=0.21) + interpolated = interpolate( + time_series, target_index, pd.to_timedelta("15 minutes"), warning_threshold=0.21 + ) pd.testing.assert_series_equal(interpolated, expected_series) def test_interpolate_dataframe(test_df, df_target_index, df_expected_result): - interpolated = interpolate(test_df, df_target_index, pd.to_timedelta('15 minutes'), - warning_threshold=0.21) + interpolated = interpolate( + test_df, df_target_index, pd.to_timedelta("15 minutes"), warning_threshold=0.21 + ) pd.testing.assert_frame_equal(interpolated, df_expected_result) @@ -127,15 +145,23 @@ def test_interpolate_warning(test_df, df_target_index, df_expected_result): N = len(test_df) all_idx = list(range(N)) # drop every other value in the first third of the dataset - index_with_gaps = all_idx[:N//3][::2] + all_idx[N//3:] + index_with_gaps = all_idx[: N // 3][::2] + all_idx[N // 3 :] test_df = test_df.iloc[index_with_gaps, :] with pytest.warns(UserWarning): - interpolate(test_df, df_target_index, pd.to_timedelta('15 minutes'), - warning_threshold=0.1) + interpolate( + test_df, + df_target_index, + pd.to_timedelta("15 minutes"), + warning_threshold=0.1, + ) with warnings.catch_warnings(): warnings.simplefilter("error") - interpolate(test_df, df_target_index, pd.to_timedelta('15 minutes'), - warning_threshold=0.5) - warnings.filterwarnings("error", message='Fraction of excluded data') + interpolate( + test_df, + df_target_index, + pd.to_timedelta("15 minutes"), + warning_threshold=0.5, + ) + warnings.filterwarnings("error", message="Fraction of excluded data") # if this test fails, it means a warning was raised that was not expected From 6ac03a1fe25e1d30128dd85177dfceba1752004c Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 30 Apr 2024 10:05:17 -0400 Subject: [PATCH 11/29] flake8 ignore E203 --- .flake8 | 5 ++++- rdtools/test/interpolate_test.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.flake8 b/.flake8 index 9d831d4e..52611dd6 100644 --- a/.flake8 +++ b/.flake8 @@ -2,6 +2,9 @@ # see https://flake8.pycqa.org/en/latest/user/options.html [flake8] +# E203 is not PEP8 compliant https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#slices +# Is excluded from flake8's own config https://flake8.pycqa.org/en/latest/user/configuration.html +extend-ignore = E203 max-line-length = 99 max-doc-length = 99 per-file-ignores = @@ -9,7 +12,7 @@ per-file-ignores = __init__.py:F401 # invalid escape sequence '\s' versioneer.py:W605 -exclude = +exclude = docs .eggs build diff --git a/rdtools/test/interpolate_test.py b/rdtools/test/interpolate_test.py index 9fbcd1d2..adad0297 100644 --- a/rdtools/test/interpolate_test.py +++ b/rdtools/test/interpolate_test.py @@ -145,7 +145,7 @@ def test_interpolate_warning(test_df, df_target_index, df_expected_result): N = len(test_df) all_idx = list(range(N)) # drop every other value in the first third of the dataset - index_with_gaps = all_idx[: N // 3][::2] + all_idx[N // 3 :] + index_with_gaps = all_idx[: N // 3][::2] + all_idx[N // 3:] test_df = test_df.iloc[index_with_gaps, :] with pytest.warns(UserWarning): interpolate( From 73d60c0b1ffcd053a4b407bde457e335c62b0430 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 1 May 2024 10:11:02 -0400 Subject: [PATCH 12/29] add pvlib clearsky filter --- rdtools/test/filtering_test.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/rdtools/test/filtering_test.py b/rdtools/test/filtering_test.py index 1dff0cf9..5ca11864 100644 --- a/rdtools/test/filtering_test.py +++ b/rdtools/test/filtering_test.py @@ -5,6 +5,7 @@ import numpy as np from rdtools import (clearsky_filter, csi_filter, + pvlib_clearsky_filter, poa_filter, tcell_filter, clip_filter, @@ -61,6 +62,27 @@ def test_csi_filter(): assert filtered.tolist() == expected_result.tolist() +@pytest.mark.parametrize("lookup_parameters", [True, False]) +def test_pvlib_clearsky_filter(lookup_parameters): + ''' Unit tests for pvlib clear sky filter.''' + + index = pd.date_range(start='01/05/2024 15:00', periods=120, freq='min') + poa_global_clearsky = pd.Series(np.linspace(800, 919, 120), index=index) + + # Add cloud event + poa_global_measured = poa_global_clearsky.copy() + poa_global_measured.iloc[60:70] = [500, 400, 300, 200, 100, 0, 100, 200, 300, 400] + + filtered = pvlib_clearsky_filter(poa_global_measured, + poa_global_clearsky, + window_length=10, + lookup_parameters=lookup_parameters) + + # Expect clearsky index is filtered. + expected_result = expected_result = poa_global_measured > 500 + pd.testing.assert_series_equal(filtered, expected_result) + + def test_poa_filter(): ''' Unit tests for plane of array insolation filter.''' From 664a7a174ad8fe894eda433d69d7e2a533512745 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 21 May 2024 17:37:57 -0400 Subject: [PATCH 13/29] add aggregated filter tests to analysis chain tests --- rdtools/test/analysis_chains_test.py | 55 +++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/rdtools/test/analysis_chains_test.py b/rdtools/test/analysis_chains_test.py index 5eb6592a..6520c282 100644 --- a/rdtools/test/analysis_chains_test.py +++ b/rdtools/test/analysis_chains_test.py @@ -1,4 +1,4 @@ -from rdtools import TrendAnalysis, normalization +from rdtools import TrendAnalysis, normalization, filtering from conftest import assert_isinstance, assert_warnings import pytest import pvlib @@ -78,6 +78,15 @@ def sensor_analysis_exp_power(sensor_parameters): return rd_analysis +@pytest.fixture +def sensor_analysis_aggregated_no_filter(sensor_parameters): + rd_analysis = TrendAnalysis(**sensor_parameters, power_dc_rated=1.0) + rd_analysis.filter_params = {} # disable all index-based filters + rd_analysis.filter_params_aggregated = {} + rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) + return rd_analysis + + def test_interpolation(basic_parameters, degradation_trend): power = degradation_trend @@ -247,6 +256,50 @@ def test_aggregated_filter_components_no_filters(sensor_parameters): assert rd_analysis.sensor_filter_components.empty +def test_aggregated_filter_components_two_way_window_filter(sensor_analysis_aggregated_no_filter): + rd_analysis = sensor_analysis_aggregated_no_filter + aggregated_no_filter = rd_analysis.sensor_aggregated_performance + rd_analysis.filter_params_aggregated = {"two_way_window_filter": {}} + rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) + daily_expected = filtering.two_way_window_filter(aggregated_no_filter) + pd.testing.assert_series_equal( + rd_analysis.sensor_filter_aggregated, daily_expected, check_names=False + ) + +def test_aggregated_filter_components_insolation_filter(sensor_analysis_aggregated_no_filter): + rd_analysis = sensor_analysis_aggregated_no_filter + aggregated_no_filter = rd_analysis.sensor_aggregated_performance + rd_analysis.filter_params_aggregated = {"insolation_filter": {}} + rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) + daily_expected = filtering.insolation_filter(aggregated_no_filter) + pd.testing.assert_series_equal( + rd_analysis.sensor_filter_aggregated, daily_expected, check_names=False + ) + + +def test_aggregated_filter_components_hampel_filter(sensor_analysis_aggregated_no_filter): + rd_analysis = sensor_analysis_aggregated_no_filter + aggregated_no_filter = rd_analysis.sensor_aggregated_performance + rd_analysis.filter_params_aggregated = {"hampel_filter": {}} + rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) + daily_expected = filtering.hampel_filter(aggregated_no_filter) + pd.testing.assert_series_equal( + rd_analysis.sensor_filter_aggregated, daily_expected, check_names=False + ) + + +def test_aggregated_filter_components_directional_tukey_filter( + sensor_analysis_aggregated_no_filter): + rd_analysis = sensor_analysis_aggregated_no_filter + aggregated_no_filter = rd_analysis.sensor_aggregated_performance + rd_analysis.filter_params_aggregated = {"directional_tukey_filter": {}} + rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) + daily_expected = filtering.directional_tukey_filter(aggregated_no_filter) + pd.testing.assert_series_equal( + rd_analysis.sensor_filter_aggregated, daily_expected, check_names=False + ) + + @pytest.mark.parametrize("workflow", ["sensor", "clearsky"]) def test_filter_ad_hoc_warnings(workflow, sensor_parameters): rd_analysis = TrendAnalysis(**sensor_parameters, power_dc_rated=1.0) From 0a11728c5300ca4ce5fa924458ab6108076cd988 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 22 May 2024 09:48:21 -0400 Subject: [PATCH 14/29] add missing line --- rdtools/test/analysis_chains_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rdtools/test/analysis_chains_test.py b/rdtools/test/analysis_chains_test.py index 6520c282..a37c44bf 100644 --- a/rdtools/test/analysis_chains_test.py +++ b/rdtools/test/analysis_chains_test.py @@ -266,6 +266,7 @@ def test_aggregated_filter_components_two_way_window_filter(sensor_analysis_aggr rd_analysis.sensor_filter_aggregated, daily_expected, check_names=False ) + def test_aggregated_filter_components_insolation_filter(sensor_analysis_aggregated_no_filter): rd_analysis = sensor_analysis_aggregated_no_filter aggregated_no_filter = rd_analysis.sensor_aggregated_performance From fb13bac7aac8e2e1391e76ed9f04be2992a2b761 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 22 May 2024 13:34:36 -0400 Subject: [PATCH 15/29] analysis chain hour angle filter test --- rdtools/test/analysis_chains_test.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/rdtools/test/analysis_chains_test.py b/rdtools/test/analysis_chains_test.py index a37c44bf..fb9dda8b 100644 --- a/rdtools/test/analysis_chains_test.py +++ b/rdtools/test/analysis_chains_test.py @@ -222,6 +222,18 @@ def test_filter_components(sensor_parameters): assert (poa_filter == rd_analysis.sensor_filter_components["poa_filter"]).all() +def test_filter_components_hour_angle(sensor_parameters, cs_input): + lat = cs_input["pvlib_location"].latitude + lon = cs_input["pvlib_location"].longitude + hour_angle_filter = filtering.hour_angle_filter(sensor_parameters["pv"], lat, lon) + rd_analysis = TrendAnalysis(**sensor_parameters, power_dc_rated=1.0) + rd_analysis.pvlib_location = cs_input['pvlib_location'] + rd_analysis.filter_params = {'hour_angle_filter': {}} + rd_analysis.filter_params_aggregated = {} + rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) + assert (hour_angle_filter[1:] == rd_analysis.sensor_filter_components["hour_angle_filter"]).all() + + def test_aggregated_filter_components(sensor_parameters): daily_ad_hoc_filter = pd.Series(True, index=sensor_parameters["pv"].index) daily_ad_hoc_filter[:600] = False From 99d49131294127ae54fd92a77778b890f37f6544 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 22 May 2024 13:36:12 -0400 Subject: [PATCH 16/29] liniting --- rdtools/test/analysis_chains_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rdtools/test/analysis_chains_test.py b/rdtools/test/analysis_chains_test.py index fb9dda8b..0f5b3521 100644 --- a/rdtools/test/analysis_chains_test.py +++ b/rdtools/test/analysis_chains_test.py @@ -231,7 +231,8 @@ def test_filter_components_hour_angle(sensor_parameters, cs_input): rd_analysis.filter_params = {'hour_angle_filter': {}} rd_analysis.filter_params_aggregated = {} rd_analysis.sensor_analysis(analyses=["yoy_degradation"]) - assert (hour_angle_filter[1:] == rd_analysis.sensor_filter_components["hour_angle_filter"]).all() + assert (hour_angle_filter[1:] == + rd_analysis.sensor_filter_components["hour_angle_filter"]).all() def test_aggregated_filter_components(sensor_parameters): From d942380c9b20aa1bba2b7ff6c4e64c60dc2a4ac0 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 17:21:53 -0400 Subject: [PATCH 17/29] sensor_clearsky_filter vs sensor_pvlib_clearsky_filter --- rdtools/analysis_chains.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/rdtools/analysis_chains.py b/rdtools/analysis_chains.py index 7793436d..0b4e1738 100644 --- a/rdtools/analysis_chains.py +++ b/rdtools/analysis_chains.py @@ -532,10 +532,11 @@ def _call_clearsky_filter(filter_string): filter_components["clearsky_filter"] = _call_clearsky_filter( "clearsky_filter" ) - if "sensor_clearsky_filter" in self.filter_params: - filter_components["sensor_clearsky_filter"] = _call_clearsky_filter( - "sensor_clearsky_filter" - ) + # TODO: Ask Mike about this section + # if "sensor_clearsky_filter" in self.filter_params: + # filter_components["sensor_clearsky_filter"] = _call_clearsky_filter( + # "sensor_clearsky_filter" + # ) # note: the previous implementation using the & operator treated NaN # filter values as False, so we do the same here for consistency: @@ -803,15 +804,16 @@ def _sensor_preprocess(self): "poa_global must be available to perform _sensor_preprocess" ) - if "sensor_clearsky_filter" in self.filter_params: - try: - if self.poa_global_clearsky is None: - self._calc_clearsky_poa(model="isotropic") - except AttributeError: - raise AttributeError( - "No poa_global_clearsky. 'set_clearsky' must be run " - + "to allow filter_params['sensor_clearsky_filter']. " - ) + # TODO: Ask Mike about this section + # if "sensor_clearsky_filter" in self.filter_params: + # try: + # if self.poa_global_clearsky is None: + # self._calc_clearsky_poa(model="isotropic") + # except AttributeError: + # raise AttributeError( + # "No poa_global_clearsky. 'set_clearsky' must be run " + # + "to allow filter_params['sensor_clearsky_filter']. " + # ) if self.power_expected is None: # Thermal details required if power_expected is not manually set. if self.temperature_cell is None and self.temperature_ambient is None: From 42142c2b198718a82b721179f2ea547c53425500 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 17:40:45 -0400 Subject: [PATCH 18/29] update pandocfilters to 1.5.1 --- docs/notebook_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/notebook_requirements.txt b/docs/notebook_requirements.txt index 6884cf65..d29e19aa 100644 --- a/docs/notebook_requirements.txt +++ b/docs/notebook_requirements.txt @@ -31,7 +31,7 @@ nbformat==5.1.0 nest-asyncio==1.5.5 notebook==6.4.12 numexpr==2.10.0 -pandocfilters==1.4.2 +pandocfilters==1.5.1 parso==0.5.2 pexpect==4.6.0 pickleshare==0.7.5 From c6bf3963e681fdfa68a864f653fb8b7e621d90fb Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 18:12:59 -0400 Subject: [PATCH 19/29] restrict numpy<2.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4e0fc9b3..74e389d0 100755 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ INSTALL_REQUIRES = [ 'matplotlib >= 3.0.0', - 'numpy >= 1.17.3', + 'numpy >= 1.17.3, <2.0', # pandas restricted to <2.1 until # https://github.com/pandas-dev/pandas/issues/55794 # is resolved From 18e10f7a0bedbb68d53c7ab076f7a5c39834b9c9 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 18:21:59 -0400 Subject: [PATCH 20/29] CODS testing turn on verbose flag --- rdtools/test/soiling_cods_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rdtools/test/soiling_cods_test.py b/rdtools/test/soiling_cods_test.py index f4908fe8..62046fd8 100644 --- a/rdtools/test/soiling_cods_test.py +++ b/rdtools/test/soiling_cods_test.py @@ -92,7 +92,9 @@ def test_soiling_cods(cods_normalized_daily): ''' Test the CODS algorithm with fixed test case and 16 repetitions''' reps = 16 np.random.seed(1977) - sr, sr_ci, deg, deg_ci, result_df = soiling.soiling_cods(cods_normalized_daily, reps=reps) + sr, sr_ci, deg, deg_ci, result_df = soiling.soiling_cods(cods_normalized_daily, + reps=reps, + verbose=True) assert 0.962207 == pytest.approx(sr, abs=0.5), \ 'Soiling ratio different from expected value' assert np.array([0.96662419, 0.95692131]) == pytest.approx(sr_ci, abs=0.5), \ From 8870cb617e3805192c4c0d6f7fe3a9f80edd256f Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 18:29:36 -0400 Subject: [PATCH 21/29] remove deprecated sapm_dc_power() --- rdtools/normalization.py | 75 ------------------------- rdtools/test/normalization_sapm_test.py | 8 --- 2 files changed, 83 deletions(-) diff --git a/rdtools/normalization.py b/rdtools/normalization.py index 8629c237..23762e29 100644 --- a/rdtools/normalization.py +++ b/rdtools/normalization.py @@ -176,81 +176,6 @@ def normalize_with_pvwatts(energy, pvwatts_kws): return energy_normalized, insolation -@deprecated(since='2.0.0', removal='3.0.0', - alternative='normalize_with_expected_power') -def sapm_dc_power(pvlib_pvsystem, met_data): - ''' - Use Sandia Array Performance Model (SAPM) and PVWatts to compute the - effective DC power using measured irradiance, ambient temperature, and wind - speed. Effective irradiance and cell temperature are calculated with SAPM, - and DC power with PVWatts. - - .. warning:: - The ``pvlib_pvsystem`` argument must be a ``pvlib.pvsystem.LocalizedPVSystem`` - object, which is no longer available as of pvlib 0.9.0. To use this function - you'll need to use an older version of pvlib. - - Parameters - ---------- - pvlib_pvsystem : pvlib.pvsystem.LocalizedPVSystem - Object contains orientation, geographic coordinates, equipment - constants (including DC rated power in watts). The object must also - specify either the ``temperature_model_parameters`` attribute or both - ``racking_model`` and ``module_type`` attributes to infer the temperature model parameters. - met_data : pandas.DataFrame - Measured irradiance components, ambient temperature, and wind speed. - Expected met_data DataFrame column names: - ['DNI', 'GHI', 'DHI', 'Temperature', 'Wind Speed'] - - Note - ---- - All series are assumed to be right-labeled, meaning that the recorded - value at a given timestamp refers to the previous time interval - - Returns - ------- - power_dc : pandas.Series - DC power in watts derived using Sandia Array Performance Model and - PVWatts. - effective_poa : pandas.Series - Effective irradiance calculated with SAPM - ''' - - solar_position = pvlib_pvsystem.get_solarposition(met_data.index) - - total_irradiance = pvlib_pvsystem\ - .get_irradiance(solar_position['zenith'], - solar_position['azimuth'], - met_data['DNI'], - met_data['GHI'], - met_data['DHI']) - - aoi = pvlib_pvsystem.get_aoi(solar_position['zenith'], - solar_position['azimuth']) - - airmass = pvlib_pvsystem\ - .get_airmass(solar_position=solar_position, model='kastenyoung1989') - airmass_absolute = airmass['airmass_absolute'] - - effective_irradiance = pvlib.pvsystem\ - .sapm_effective_irradiance(poa_direct=total_irradiance['poa_direct'], - poa_diffuse=total_irradiance['poa_diffuse'], - airmass_absolute=airmass_absolute, - aoi=aoi, - module=pvlib_pvsystem.module) - - temp_cell = pvlib_pvsystem\ - .sapm_celltemp(total_irradiance['poa_global'], - met_data['Temperature'], - met_data['Wind Speed']) - - power_dc = pvlib_pvsystem\ - .pvwatts_dc(g_poa_effective=effective_irradiance, - temp_cell=temp_cell) - - return power_dc, effective_irradiance - - @deprecated(since='2.0.0', removal='3.0.0', alternative='normalize_with_expected_power') def normalize_with_sapm(energy, sapm_kws): diff --git a/rdtools/test/normalization_sapm_test.py b/rdtools/test/normalization_sapm_test.py index 12f9fb82..e70c2d67 100644 --- a/rdtools/test/normalization_sapm_test.py +++ b/rdtools/test/normalization_sapm_test.py @@ -82,14 +82,6 @@ def setUp(self): def tearDown(self): pass - @fail_on_rdtools_version('3.0.0') - def test_sapm_dc_power(self): - ''' Test SAPM DC power. ''' - - with pytest.warns(rdtoolsDeprecationWarning): - dc_power, poa = sapm_dc_power(self.pvsystem, self.irrad) - self.assertEqual(self.irrad.index.freq, dc_power.index.freq) - self.assertEqual(len(self.irrad), len(dc_power)) @fail_on_rdtools_version('3.0.0') def test_normalization_with_sapm(self): From ebb966ed9e57bf31a436a04007fc4b23f0b230fc Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 18:31:42 -0400 Subject: [PATCH 22/29] remove deprecated normalize_with_sapm() --- rdtools/normalization.py | 57 ------------ rdtools/test/normalization_sapm_test.py | 117 ------------------------ 2 files changed, 174 deletions(-) delete mode 100644 rdtools/test/normalization_sapm_test.py diff --git a/rdtools/normalization.py b/rdtools/normalization.py index 23762e29..456e4c2e 100644 --- a/rdtools/normalization.py +++ b/rdtools/normalization.py @@ -176,63 +176,6 @@ def normalize_with_pvwatts(energy, pvwatts_kws): return energy_normalized, insolation -@deprecated(since='2.0.0', removal='3.0.0', - alternative='normalize_with_expected_power') -def normalize_with_sapm(energy, sapm_kws): - ''' - Normalize system AC energy output given measured met_data and - meteorological data. This method relies on the Sandia Array Performance - Model (SAPM) to compute the effective DC energy using measured irradiance, - ambient temperature, and wind speed. - - Energy timeseries and met_data timeseries can be different granularities. - - .. warning:: - The ``pvlib_pvsystem`` argument must be a ``pvlib.pvsystem.LocalizedPVSystem`` - object, which is no longer available as of pvlib 0.9.0. To use this function - you'll need to use an older version of pvlib. - - Parameters - ---------- - energy : pandas.Series - Energy time series to be normalized in watt hours. - Must be a right-labeled regular time series. - sapm_kws : dict - Dictionary of parameters required for sapm_dc_power function. See - Other Parameters. - - Other Parameters - --------------- - pvlib_pvsystem : pvlib.pvsystem.LocalizedPVSystem object - Object contains orientation, geographic coordinates, equipment - constants (including DC rated power in watts). The object must also - specify either the ``temperature_model_parameters`` attribute or both - ``racking_model`` and ``module_type`` to infer the model parameters. - met_data : pandas.DataFrame - Measured met_data, ambient temperature, and wind speed. Expected - column names are ['DNI', 'GHI', 'DHI', 'Temperature', 'Wind Speed'] - - Note - ---- - All series are assumed to be right-labeled, meaning that the recorded - value at a given timestamp refers to the previous time interval - - Returns - ------- - energy_normalized : pandas.Series - Energy divided by Sandia Model DC energy. - insolation : pandas.Series - Insolation associated with each normalized point - ''' - - power_dc, irrad = sapm_dc_power(**sapm_kws) - - energy_normalized, insolation = normalize_with_expected_power(energy, power_dc, irrad, - pv_input='energy') - - return energy_normalized, insolation - - def _delta_index(series): ''' Takes a pandas series with a DatetimeIndex as input and diff --git a/rdtools/test/normalization_sapm_test.py b/rdtools/test/normalization_sapm_test.py deleted file mode 100644 index e70c2d67..00000000 --- a/rdtools/test/normalization_sapm_test.py +++ /dev/null @@ -1,117 +0,0 @@ -""" Energy Normalization with SAPM Unit Tests. """ - -import unittest -import pytest - -import pandas as pd -import numpy as np -import pvlib - -from rdtools.normalization import normalize_with_sapm -from rdtools.normalization import sapm_dc_power - -from conftest import fail_on_rdtools_version, requires_pvlib_below_090 -from rdtools._deprecation import rdtoolsDeprecationWarning - - -@requires_pvlib_below_090 -class SapmNormalizationTestCase(unittest.TestCase): - ''' Unit tests for energy normalization module. ''' - - def setUp(self): - # define module constants and parameters - module = {} - module['A0'] = 0.0315 - module['A1'] = 0.05975 - module['A2'] = -0.01067 - module['A3'] = 0.0008 - module['A4'] = -2.24e-5 - module['B0'] = 1 - module['B1'] = -0.002438 - module['B2'] = 0.00031 - module['B3'] = -1.246e-5 - module['B4'] = 2.11e-7 - module['B5'] = -1.36e-9 - module['FD'] = 1 - module_parameters = { - 'pdc0': 2.1, - 'gamma_pdc': -0.0045 - } - - # define location - test_location = pvlib.location\ - .Location(latitude=37.88447702, longitude=-122.2652549) - - self.pvsystem = pvlib.pvsystem\ - .LocalizedPVSystem(location=test_location, - surface_tilt=20, - surface_azimuth=180, - module=module, - module_parameters=module_parameters, - racking_model='insulated_back', - module_type='glass_polymer', - modules_per_string=6) - - # define dummy energy data - energy_freq = 'MS' - energy_periods = 12 - energy_index = pd.date_range(start='2012-01-01', - periods=energy_periods, - freq=energy_freq) - - dummy_energy = np.repeat(a=100, repeats=energy_periods) - self.energy = pd.Series(dummy_energy, index=energy_index) - self.energy_periods = 12 - - # define dummy meteorological data - irrad_columns = ['DNI', 'GHI', 'DHI', 'Temperature', 'Wind Speed'] - irrad_freq = 'D' - irrad_index = pd.date_range(start=energy_index[0], - end=energy_index[-1] - pd.to_timedelta('1 nanosecond'), - freq=irrad_freq) - self.irrad = pd.DataFrame([[100, 45, 30, 25, 10]], - index=irrad_index, - columns=irrad_columns) - - # define an irregular pandas series - times = pd.DatetimeIndex(['2012-01-01 12:00', '2012-01-01 12:05', '2012-01-01 12:06', - '2012-01-01 12:09']) - data = [1, 2, 3, 4] - self.irregular_timeseries = pd.Series(data=data, index=times) - - def tearDown(self): - pass - - - @fail_on_rdtools_version('3.0.0') - def test_normalization_with_sapm(self): - ''' Test SAPM normalization. ''' - - sapm_kws = { - 'pvlib_pvsystem': self.pvsystem, - 'met_data': self.irrad, - } - - with pytest.warns(rdtoolsDeprecationWarning): - corr_energy, insol = normalize_with_sapm(self.energy, sapm_kws) - - # Test output is same frequency and length as energy - self.assertEqual(corr_energy.index.freq, self.energy.index.freq) - # Expected behavior is to have a nan at energy.index[0] - self.assertEqual(len(corr_energy.dropna()), len(self.energy)-1) - - # Test for valueError when energy frequency can't be inferred - with self.assertRaises(ValueError): - with pytest.warns(rdtoolsDeprecationWarning): - corr_energy, insolation = normalize_with_sapm(self.irregular_timeseries, sapm_kws) - - # TODO, test for: - # incorrect data format - # incomplete data - # missing pvsystem metadata - # missing measured irradiance data - # met_data freq > energy freq, issue/warining? - - -if __name__ == '__main__': - unittest.main() From 0f4d12c1d44ccfc71b201348935ac7bb41ebb0d3 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Tue, 25 Jun 2024 18:33:05 -0400 Subject: [PATCH 23/29] remove normalize_with_sapm from init --- docs/sphinx/source/api.rst | 1 - rdtools/__init__.py | 1 - 2 files changed, 2 deletions(-) diff --git a/docs/sphinx/source/api.rst b/docs/sphinx/source/api.rst index 86520dbd..bd23a4f5 100644 --- a/docs/sphinx/source/api.rst +++ b/docs/sphinx/source/api.rst @@ -127,7 +127,6 @@ Normalization irradiance_rescale normalize_with_expected_power normalize_with_pvwatts - normalize_with_sapm pvwatts_dc_power sapm_dc_power delta_index diff --git a/rdtools/__init__.py b/rdtools/__init__.py index 342427c2..c097e2be 100644 --- a/rdtools/__init__.py +++ b/rdtools/__init__.py @@ -1,4 +1,3 @@ -from rdtools.normalization import normalize_with_sapm from rdtools.normalization import normalize_with_pvwatts from rdtools.normalization import irradiance_rescale from rdtools.normalization import energy_from_power From 8d3e37c1898c7dfe9368b5b93c42901454eda539 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 26 Jun 2024 09:37:56 -0400 Subject: [PATCH 24/29] add deprecations to changelog --- docs/sphinx/source/changelog/pending.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/sphinx/source/changelog/pending.rst b/docs/sphinx/source/changelog/pending.rst index 7a5252ed..7cdcdd82 100644 --- a/docs/sphinx/source/changelog/pending.rst +++ b/docs/sphinx/source/changelog/pending.rst @@ -21,6 +21,11 @@ Requirements ------------ * Specified versions in ``requirements.txt`` and ``docs/notebook_requirements.txt`` have been updated (:pull:`412`) +Deprecations +------------ +* Removed :py:func:`~rdtools.normalization.sapm_dc_power` +* Removed :py:func:`~rdtools.normalization.normalize_with_sapm` + Contributors ------------ * Martin Springer (:ghuser:`martin-springer`) From 5d221951c2ecee0a263a462335b22364e96c4643 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 26 Jun 2024 09:39:22 -0400 Subject: [PATCH 25/29] remove unused pvlib import from normalization.py --- rdtools/normalization.py | 1 - 1 file changed, 1 deletion(-) diff --git a/rdtools/normalization.py b/rdtools/normalization.py index 456e4c2e..2509cd33 100644 --- a/rdtools/normalization.py +++ b/rdtools/normalization.py @@ -1,7 +1,6 @@ '''Functions for normalizing, rescaling, and regularizing PV system data.''' import pandas as pd -import pvlib import numpy as np from scipy.optimize import minimize import warnings From fc0ee0c65c024c84a3eb3e7fac53607190ad59a5 Mon Sep 17 00:00:00 2001 From: martin-springer Date: Wed, 26 Jun 2024 09:51:24 -0400 Subject: [PATCH 26/29] add pull request number to changelog --- docs/sphinx/source/changelog/pending.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sphinx/source/changelog/pending.rst b/docs/sphinx/source/changelog/pending.rst index 7cdcdd82..9a235ea5 100644 --- a/docs/sphinx/source/changelog/pending.rst +++ b/docs/sphinx/source/changelog/pending.rst @@ -23,8 +23,8 @@ Requirements Deprecations ------------ -* Removed :py:func:`~rdtools.normalization.sapm_dc_power` -* Removed :py:func:`~rdtools.normalization.normalize_with_sapm` +* Removed :py:func:`~rdtools.normalization.sapm_dc_power` (:pull:`419`) +* Removed :py:func:`~rdtools.normalization.normalize_with_sapm` (:pull:`419`) Contributors ------------ From 33176ae2af883c1744f876cd99dd248cd2cbfdde Mon Sep 17 00:00:00 2001 From: Michael Deceglie Date: Tue, 2 Jul 2024 10:39:38 -0600 Subject: [PATCH 27/29] uncomment sensor_clearsky_filter block --- rdtools/analysis_chains.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/rdtools/analysis_chains.py b/rdtools/analysis_chains.py index 72863e46..6208e1e2 100644 --- a/rdtools/analysis_chains.py +++ b/rdtools/analysis_chains.py @@ -806,16 +806,15 @@ def _sensor_preprocess(self): "poa_global must be available to perform _sensor_preprocess" ) - # TODO: Ask Mike about this section - # if "sensor_clearsky_filter" in self.filter_params: - # try: - # if self.poa_global_clearsky is None: - # self._calc_clearsky_poa(model="isotropic") - # except AttributeError: - # raise AttributeError( - # "No poa_global_clearsky. 'set_clearsky' must be run " - # + "to allow filter_params['sensor_clearsky_filter']. " - # ) + if "sensor_clearsky_filter" in self.filter_params: + try: + if self.poa_global_clearsky is None: + self._calc_clearsky_poa(model="isotropic") + except AttributeError: + raise AttributeError( + "No poa_global_clearsky. 'set_clearsky' must be run " + + "to allow filter_params['sensor_clearsky_filter']. " + ) if self.power_expected is None: # Thermal details required if power_expected is not manually set. if self.temperature_cell is None and self.temperature_ambient is None: From 197a203a5d2a4caa882b2830103ff0f217f243e5 Mon Sep 17 00:00:00 2001 From: Michael Deceglie Date: Tue, 2 Jul 2024 12:59:09 -0600 Subject: [PATCH 28/29] remove blank line --- rdtools/analysis_chains.py | 1 - 1 file changed, 1 deletion(-) diff --git a/rdtools/analysis_chains.py b/rdtools/analysis_chains.py index 6208e1e2..ce08c137 100644 --- a/rdtools/analysis_chains.py +++ b/rdtools/analysis_chains.py @@ -539,7 +539,6 @@ def _call_clearsky_filter(filter_string): "sensor_clearsky_filter" ) - # note: the previous implementation using the & operator treated NaN # filter values as False, so we do the same here for consistency: filter_components = pd.DataFrame(filter_components).fillna(False) From 1a8b45e3efa4c6b40e1d02884fed743e00ba0106 Mon Sep 17 00:00:00 2001 From: Michael Deceglie Date: Tue, 2 Jul 2024 15:14:28 -0600 Subject: [PATCH 29/29] remove blank line from end of file --- rdtools/test/energy_from_power_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/rdtools/test/energy_from_power_test.py b/rdtools/test/energy_from_power_test.py index 2805555a..ee7a8561 100644 --- a/rdtools/test/energy_from_power_test.py +++ b/rdtools/test/energy_from_power_test.py @@ -124,4 +124,3 @@ def test_energy_from_power_series_index(): power = pd.Series([1, 2, 3, 4, 5]) with pytest.raises(ValueError): energy_from_power(power) -