Skip to content

Commit

Permalink
Merge pull request #362 from NREL/pandas_200
Browse files Browse the repository at this point in the history
Fixes for pandas 2.0.0 compatibility
  • Loading branch information
mdeceglie authored May 16, 2023
2 parents e06b65f + 8aa7733 commit d19fa83
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 11 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pytest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ jobs:
pip install ${{ matrix.env }}
- name: Test with pytest ${{ matrix.env }}
run: |
pytest
pytest
1 change: 1 addition & 0 deletions docs/sphinx/source/changelog.rst
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
RdTools Change Log
==================
.. include:: changelog/v2.1.5.rst
.. include:: changelog/v2.1.4.rst
.. include:: changelog/v2.1.3.rst
.. include:: changelog/v2.1.2.rst
Expand Down
13 changes: 13 additions & 0 deletions docs/sphinx/source/changelog/v2.1.5.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
*************************
v2.1.5 (May 16, 2023)
*************************

Bug Fixes
---------
* Add support for pandas 2.0 (:issue:`361`, :pull:`362`)


Contributors
------------
* Kevin Anderson (:ghuser:`kanderso-nrel`)
* Michael Deceglie (:ghuser:`mdeceglie`)
6 changes: 3 additions & 3 deletions rdtools/degradation.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def degradation_ols(energy_normalized, confidence_level=68.2):

# calculate a years column as x value for regression, ignoring leap years
day_diffs = (df.index - df.index[0])
df['days'] = day_diffs.astype('timedelta64[s]') / (60 * 60 * 24)
df['days'] = day_diffs / pd.Timedelta('1d')
df['years'] = df.days / 365.0

# add intercept-constant to the exogeneous variable
Expand Down Expand Up @@ -121,7 +121,7 @@ def degradation_classical_decomposition(energy_normalized,

# calculate a years column as x value for regression, ignoring leap years
day_diffs = (df.index - df.index[0])
df['days'] = day_diffs.astype('timedelta64[s]') / (60 * 60 * 24)
df['days'] = day_diffs / pd.Timedelta('1d')
df['years'] = df.days / 365.0

# Compute yearly rolling mean to isolate trend component using
Expand Down Expand Up @@ -266,7 +266,7 @@ def degradation_year_on_year(energy_normalized, recenter=True,
tolerance=pd.Timedelta('8D')
)

df['time_diff_years'] = (df.dt - df.dt_right).astype('timedelta64[h]') / 8760.0
df['time_diff_years'] = (df.dt - df.dt_right) / pd.Timedelta('365d')
df['yoy'] = 100.0 * (df.energy - df.energy_right) / (df.time_diff_years)
df.index = df.dt

Expand Down
8 changes: 4 additions & 4 deletions rdtools/filtering.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,8 +424,9 @@ def logic_clip_filter(power_ac,
# series sampling frequency is less than 95% consistent.
_check_data_sampling_frequency(power_ac)
# Get the sampling frequency of the time series
time_series_sampling_frequency = power_ac.index.to_series().diff()\
.astype('timedelta64[m]').mode()[0]
time_series_sampling_frequency = (
power_ac.index.to_series().diff() / pd.Timedelta('60s')
).mode()[0]
# Make copies of the original inputs for the cases that the data is
# changes for clipping evaluation
original_time_series_sampling_frequency = time_series_sampling_frequency
Expand Down Expand Up @@ -651,8 +652,7 @@ def xgboost_clip_filter(power_ac,
# series sampling frequency is less than 95% consistent.
_check_data_sampling_frequency(power_ac)
# Get the most common sampling frequency
sampling_frequency = int(power_ac.index.to_series().diff()
.astype('timedelta64[m]').mode()[0])
sampling_frequency = int((power_ac.index.to_series().diff() / pd.Timedelta('60s')).mode()[0])
freq_string = str(sampling_frequency) + "T"
# Min-max normalize
# Resample the series based on the most common sampling frequency
Expand Down
2 changes: 1 addition & 1 deletion rdtools/test/availability_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def difficult_data():

# generate a plausible clear-sky power signal
times = pd.date_range('2019-01-01', '2019-01-06', freq='15min',
tz='US/Eastern', closed='left')
tz='US/Eastern')
location = pvlib.location.Location(40, -80)
clearsky = location.get_clearsky(times, model='haurwitz')
# just scale GHI to power for simplicity
Expand Down
2 changes: 1 addition & 1 deletion rdtools/test/degradation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def get_corr_energy(cls, rd, input_freq):
freq = input_freq

x = pd.date_range(start=start, end=end, freq=freq)
day_deltas = (x - x[0]).astype('timedelta64[s]') / (60.0 * 60.0 * 24)
day_deltas = (x - x[0]) / pd.Timedelta('1d')
noise = (np.random.rand(len(day_deltas)) - 0.5) / 1e3

y = 1 + daily_rd * day_deltas + noise
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
'pytest >= 3.6.3',
'coverage',
'flake8',
'nbval',
'nbval==0.9.6', # https://github.com/computationalmodelling/nbval/issues/194
'pytest-mock',
]

Expand Down

0 comments on commit d19fa83

Please sign in to comment.