-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathatcbias_apply_matrix.py
305 lines (241 loc) · 11.7 KB
/
atcbias_apply_matrix.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to apply matrix correction for Average then Clip (AtC) bias
"""
import os
import re
import numpy as np
import pandas as pd
import pvlib
"""
BSD 3-Clause License
Copyright (c) 2024 Allen Analytics LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = "Jonathan Allen"
__copyright__ = "Copyright 2024, Allen Analytics LLC"
__license__ = "BSD"
__email__ = "[email protected]"
"""
Project funded by EPRI, see ***
"""
"""
Use this script to estimate hourly corrections for the Average then Clip (AtC)
bias using the method described in [1].
The user provides three CSV files containing hourly PV modeling inputs and
outputs. The user enters the file names in the script below.
The CSV files are typically created using PV modeling tools, e.g. SAM or PVSyst;
they must contain:
- Hourly modeling using plant specifications and realistic meteorological data
o time stamp
o DNI (W/m2) (or GHI and DHI for PVSyst)
o Pac
- Hourly modeling using dc:ac ratio = 1.0 and realistic meteorological data
o time stamp
o Pac
- Hourly modeling using plant specifications and reference sky radiation
o time stamp
o DNI (W/m2) (or GHI and DHI for PVSyst)
o Pdc
The CSV files will be joined based on the row number, so these must be
consistent across the files. Units of power must also be consistent across
the CSV files. The hourly modeling with dc:ac ratio = 1.0 is generated by
increasing the number of modules.
The user also provides a file with an AtC Bias correction matrix [1] which
contains average scaled AtC Bias (delta) for combinations of radiation index
(gamma) and clipping potential.
The script outputs an estimates of annual AtC Bias and a CSV file with the
time series average scaled AtC Bias. The rows of the output CSV file match
those of the input files.
Note that the hourly estimates of average scaled AtC Bias are expected to
match the annual (and perhaps month-hour) average AtC Bias, but are not
expected to match the hourly values. See [1] for a detailed explanation.
References
----------
.. [1] J. O. Allen, W. B. Hobbs, and M. Bolen, "Classification Method to
Predict the Effect of Short-Term Inverter Saturation on PV Performance
Modeling", PV Performance Modeling Workshop Salt Lake City, 2022.
"""
# ------------------------------------------------------------------------------
# User Inputs
# ------------------------------------------------------------------------------
project_dir = r"C:\a2\epri_atcbias3\data\rabin"
# dc:ac ratio of plant specifications; see note below.
dc_ac_ratio = 1.4
# Format of PV modeling files
# For PVSyst modify the col_name lists in read_pvsyst_csv()
pv_model = "PVSyst"
# Hourly modeling using plant specifications and realistic meteorological data
pac_csv = os.path.join(project_dir, "PVModel_PVSyst_1.4.csv")
# Hourly modeling using dc:ac ratio = 1.0 and realistic meteorological data
pac_dcac1_csv = os.path.join(project_dir, "PVModel_PVSyst_1.csv")
# Hourly modeling using plant specifications and reference sky radiation
pac_refsky_csv = os.path.join(project_dir, "PVModel_PVSyst_refsky.csv")
# AtC Bias Correction Matrix
matrix_fn = os.path.join(project_dir, "method2.csv")
# Output CSV file name
output_csv = os.path.join(project_dir, "atc_bias_output.csv")
# ------------------------------------------------------------------------------
# Utility Functions
# ------------------------------------------------------------------------------
def read_limits(limit_lines):
# Convert text list of bin limits to list
# So for
# limit_lines = ['0 - 1', '1 - 5', '5 - 20']
# limit_list = [0, 1, 5, 20]
limit_list = []
n = []
for limit_line in limit_lines:
n = re.findall(r"[+-]?\d*\.*\d+", limit_line)
limit_list.append(float(n[0]))
limit_list.append(float(n[1]))
return limit_list
# ------------------------------------------------------------------------------
# Read Input Files from SAM
# ------------------------------------------------------------------------------
def read_sam_one_csv(csv_fn, col_dict):
csv_df = pd.read_csv(csv_fn)
csv_df = csv_df.rename(columns=col_dict)
csv_df = csv_df.drop(columns=csv_df.columns.difference(col_dict.values()))
return csv_df
def read_sam_csv(pac_csv, pac_dcac1_csv, pac_refsky_csv):
# Pac and DNI for plant specifications
col_dict = {
"Irradiance DNI from weather file | (W/m2)": "dni",
"Inverter AC output power | (kW)": "pac",
}
df = read_sam_one_csv(pac_csv, col_dict)
# Pac for dc:ac ratio = 1.0
col_dict = {
"Inverter AC output power | (kW)": "pac_dcac1",
}
df2 = read_sam_one_csv(pac_dcac1_csv, col_dict)
df = df.join(df2)
# Pdc and DNI for reference sky
col_dict = {
"Irradiance DNI from weather file | (W/m2)": "dni_refsky",
"Inverter DC input power | (kW)": "pdc_refsky",
}
df2 = read_sam_one_csv(pac_refsky_csv, col_dict)
df = df.join(df2)
return df
# ------------------------------------------------------------------------------
# Read Input Files from PVSyst
# ------------------------------------------------------------------------------
def read_pvsyst_one_csv(csv_fn):
# Read column names on row 11
skip_rows = 10
encoding = "latin-1"
csv_df1 = pd.read_csv(csv_fn, skiprows=skip_rows, encoding=encoding)
# Read data starting on row 13
skip_rows = 13
encoding = "latin-1"
csv_df2 = pd.read_csv(csv_fn, skiprows=skip_rows, encoding=encoding,
header=None, names=csv_df1.columns)
csv_df2.index = pd.to_datetime(csv_df2['date'], dayfirst=True, format='mixed')
return csv_df2
def read_pvsyst_csv(pac_csv, pac_dcac1_csv, pac_refsky_csv):
# Expected columns of data (not all are needed for each of the 3 csv files)
col_dict = {'GlobHor': 'ghi', 'DiffHor': 'dhi', 'HSol': 'solar_elevation',
'EArray': 'pdc', 'EOutInv': 'pac'}
# Pac and DNI for plant specifications
# Required columns - 'date', 'zenith', 'ghi', 'dhi', 'pac'
df = read_pvsyst_one_csv(pac_csv)
df = df.rename(columns=col_dict)
df = df.sort_index()
# Calculate DNI as (GHI - DHI) / cos(zenith)
df["dni"] = (df["ghi"] - df["dhi"]) / pvlib.tools.cosd(90 - df["solar_elevation"])
# Pac for dc:ac ratio = 1.0
# Required columns - 'date', 'pac'
df2 = read_pvsyst_one_csv(pac_dcac1_csv)
df2 = df2.rename(columns=col_dict)
df = df.join(df2, rsuffix='_dcac1')
# Pdc and DNI for reference sky
# Required columns - 'date', 'ghi', 'dhi', 'pdc'
df2 = read_pvsyst_one_csv(pac_refsky_csv)
df2 = df2.rename(columns=col_dict)
df = df.join(df2, rsuffix='_refsky')
# Calculate DNI as (GHI - DHI) / cos(zenith)
df["dni_refsky"] = (df["ghi_refsky"] - df["dhi_refsky"]) / pvlib.tools.cosd(90 - df["solar_elevation"])
return df
# ------------------------------------------------------------------------------
# Read Input Data from CSV Files
# ------------------------------------------------------------------------------
match pv_model:
case "SAM":
pac_df = read_sam_csv(pac_csv, pac_dcac1_csv, pac_refsky_csv)
case "PVSyst":
pac_df = read_pvsyst_csv(pac_csv, pac_dcac1_csv, pac_refsky_csv)
case _:
pac_df = pd.DataFrame(["ghi", "dhi", "pac", "pac_dcac1", "ghi_refsky",
"dhi_refsky", "pac_refsky"])
# ------------------------------------------------------------------------------
# Read Correction Matrix
# ------------------------------------------------------------------------------
matrix_df = pd.read_csv(matrix_fn)
cp_limit_headers = matrix_df.iloc[:, 1:].columns
cp_limits = read_limits(cp_limit_headers)
gamma_limit_lines = matrix_df.iloc[:, 0]
gamma_limits = read_limits(gamma_limit_lines)
corr_matrix = matrix_df.iloc[:, 1:]
# ------------------------------------------------------------------------------
# Calculate gamma and CP
# ------------------------------------------------------------------------------
pac_df["gamma"] = pac_df["dni"] / pac_df["dni_refsky"]
# Get inverter capacity estimated from the maximum observed hourly output.
# This will only work for installations with dc:ac ratio > 1.0.
# The user can also enter the actual value here.
pac0 = pac_df["pac"].max()
pac_df["cp"] = (pac_df["pdc_refsky"] - pac0) / pac0
# ------------------------------------------------------------------------------
# Create AtC Bias Correction Time Series
# ------------------------------------------------------------------------------
# Find the gamma and CP bins for each hour
gamma_bin = pd.cut(pac_df.gamma, gamma_limits, labels=False).fillna(-1).astype("int")
cp_bin = pd.cut(pac_df.cp, cp_limits, labels=False).fillna(-1).astype("int")
# Fill in the estimated correction for each hour
correction = np.zeros(len(pac_df))
# Use for loop to fill correction values from matrix with these complications:
# - bin indices may be invalid (-1)
# - no corrections for first and last hour of PV output (hours of sunrise and sunset)
for i in range(len(pac_df)):
# Fill corrections from matrix
if gamma_bin[i] >= 0 and cp_bin[i] >= 0:
correction[i] = corr_matrix.iloc[gamma_bin[i], cp_bin[i]]
# Exclude hours of sunrise and sunset
if 0 < i < (len(pac_df) - 1):
if np.isnan(pac_df["pac"][i - 1]) or np.isnan(pac_df["pac"][i + 1]):
correction[i] = 0
# Scale correction by dc:ac ratio because correction matrix was developed by
# scaling inverter _down_ whereas dc output using PV models is calculated by
# scaling modules _up_. Remove '* dc_ac_ratio' in next line if dc output
# generated by scaling inverter down.
pac_df["bias_correction"] = correction * dc_ac_ratio
pac_df["pac_bias_Correction"] = correction * pac_df["pac_dcac1"].sum()
# ------------------------------------------------------------------------------
# Create AtC Bias Correction Time Series
# ------------------------------------------------------------------------------
n_years = len(pac_df) / 8760
print(f'Annual AtC Bias = {pac_df["bias_correction"].sum() / n_years * 100}%')
pac_df.to_csv(output_csv)