Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/USEPA/StreamCat
Browse files Browse the repository at this point in the history
  • Loading branch information
TravisH18 committed Nov 6, 2024
2 parents 177246c + 1e3be3c commit 3e5262a
Show file tree
Hide file tree
Showing 6 changed files with 1,995 additions and 59 deletions.
192 changes: 184 additions & 8 deletions ControlTable_StreamCat.csv

Large diffs are not rendered by default.

38 changes: 23 additions & 15 deletions PartitionDownscaledResults.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,28 +22,36 @@

# Nutrient file
#nut_dir = 'O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/StreamCat/NutrientInventory/Inputs/'
nut_dir = 'E:/WorkingData/To_Be_Flow_Accumulated/'
nut = pd.read_csv(nut_dir + 'ClimTerms_2012_10.csv')
# nut_dir = 'E:/WorkingData/To_Be_Flow_Accumulated/'
# nut = pd.read_csv(nut_dir + 'ClimTerms_2012_10.csv')
nut_dir = 'O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/AmaliaHandler/'
nut = pd.read_csv(nut_dir + 'ToBeFlowAccumulated_update.csv')

cat_area = pd.read_csv('O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/StreamCat/NutrientInventory/Inputs/COMID_Scaled_AgVars.csv')
cat_area = cat_area[['COMID','CatAreaSqKm']]
cat_area.head()
# add VPU using lookup table
nut = pd.merge(nut, COMID_VPU, how='left', left_on=['COMID'], right_on=['COMID'])
nut = pd.merge(nut, cat_area, how='left', left_on=['COMID'], right_on=['COMID'])
nut = nut.drop('Unnamed: 0', axis=1)
# nut = nut.drop('Unnamed: 0', axis=1)
# nut = nut.drop('...1', axis=1)
list(nut)

# select columns - this part we can modify to iterate through columns
final = nut[['COMID', 'SNOW_YrMean', 'CatAreaSqKm', 'VPU']]
final = final.rename(columns={'SNOW_YrMean': 'CatSum'})
final['CatCount'] = final['CatAreaSqKm']
final['CatPctFull'] = 100
final = final.set_axis(['COMID', 'CatSum', 'CatAreaSqKm','VPU', 'CatCount', 'CatPctFull'], axis=1)

for i in VPU:
print(i)
df = final[final['VPU'] == i]
df = df.drop(columns=['VPU'])
df.to_csv(nut_dir + '/Allocation_and_Accumulation/SNOW_YrMean_' + str(i) + '.csv',
index=False)
nut.columns = nut.columns.str.replace('_Cat','')
cols = [i for i in nut.columns if i not in ["COMID", "VPU", "CatAreaSqKm"]]

for col in cols:
final = nut[['COMID', col, 'CatAreaSqKm', 'VPU']]
final = final.rename(columns={col: 'CatSum'})
final['CatCount'] = final['CatAreaSqKm']
final['CatSum'] = final['CatSum'] * final['CatCount']
final['CatPctFull'] = 100
final = final[['COMID', 'CatAreaSqKm', 'CatCount', 'CatSum', 'CatPctFull', 'VPU']]

for i in VPU:
print(i)
df = final[final['VPU'] == i]
df = df.drop(columns=['VPU'])
df.to_csv(nut_dir + '/Allocation_and_Accumulation/' + col + '_' + str(i) + '.csv',
index=False)
72 changes: 36 additions & 36 deletions StreamCat.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,34 +113,34 @@
end="",
flush=True,
)
# for zone, hydroregion in INPUTS.items():
# if not os.path.exists(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv"):
# print(zone, end=", ", flush=True)
# pre = f"{NHD_DIR}/NHDPlus{hydroregion}/NHDPlus{zone}"
# if not row.accum_type == "Point":
# izd = (
# f"{mask_dir}/{zone}.tif"
# if mask_dir
# else f"{pre}/NHDPlusCatchment/cat"
# )
# cat = createCatStats(
# row.accum_type,
# layer,
# izd,
# OUT_DIR,
# zone,
# row.by_RPU,
# mask_dir,
# NHD_DIR,
# hydroregion,
# apm,
# )
# if row.accum_type == "Point":
# izd = f"{pre}/NHDPlusCatchment/Catchment.shp"
# cat = PointInPoly(
# points, zone, izd, pct_full, mask_dir, apm, summary
# )
# cat.to_csv(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv", index=False)
for zone, hydroregion in INPUTS.items():
if not os.path.exists(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv"):
print(zone, end=", ", flush=True)
pre = f"{NHD_DIR}/NHDPlus{hydroregion}/NHDPlus{zone}"
if not row.accum_type == "Point":
izd = (
f"{mask_dir}/{zone}.tif"
if mask_dir
else f"{pre}/NHDPlusCatchment/cat"
)
cat = createCatStats(
row.accum_type,
layer,
izd,
OUT_DIR,
zone,
row.by_RPU,
mask_dir,
NHD_DIR,
hydroregion,
apm,
)
if row.accum_type == "Point":
izd = f"{pre}/NHDPlusCatchment/Catchment.shp"
cat = PointInPoly(
points, zone, izd, pct_full, mask_dir, apm, summary
)
cat.to_csv(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv", index=False)
print("done!")
print("Accumulating...", end="", flush=True)
for zone in INPUTS:
Expand Down Expand Up @@ -184,11 +184,11 @@
final = pd.merge(cat, upFinal, on="COMID")
final.to_csv(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv", index=False)
print(end="") if processed else print("done!")
if already_processed:
print(
"\n!!!Processing Problem!!!\n\n"
f"{', '.join(already_processed)} already run!\n"
"Be sure to delete the associated files in your `OUTDIR` to rerun:"
f"\n\t> {OUT_DIR}\n\n!!! `$OUT_DIR/DBF_stash/*` "
f"output used in 'Continuous' and 'Categorical' metrics!!!"
)
if already_processed:
print(
"\n!!!Processing Problem!!!\n\n"
f"{', '.join(already_processed)} already run!\n"
"Be sure to delete the associated files in your `OUTDIR` to rerun:"
f"\n\t> {OUT_DIR}\n\n!!! `$OUT_DIR/DBF_stash/*` "
f"output used in 'Continuous' and 'Categorical' metrics!!!"
)
Loading

0 comments on commit 3e5262a

Please sign in to comment.