diff --git a/--algorithm b/--algorithm
new file mode 100644
index 0000000..e69de29
diff --git a/hydromodel/app/show_results.ipynb b/hydromodel/app/show_results.ipynb
new file mode 100644
index 0000000..2948514
--- /dev/null
+++ b/hydromodel/app/show_results.ipynb
@@ -0,0 +1,236 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 分析结果\n",
+ "\n",
+ "这是一个用于分析模型率定后测试结果的 Jupyter Notebook。读取各个exp下面的结果文件看看。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Please Check your directory:\n",
+ "ROOT_DIR of the repo: d:\\code\\hydro-model-xaj\n",
+ "DATASET_DIR of the repo: d:\\data\n"
+ ]
+ }
+ ],
+ "source": [
+ "import os\n",
+ "import sys\n",
+ "from pathlib import Path\n",
+ "\n",
+ "sys.path.append(os.path.dirname(Path(os.path.abspath('')).parent))\n",
+ "import definitions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "metric_mean_file = Path(os.path.join(\"D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/模型运行/basins_test_metrics_mean_all_cases.csv\"))\n",
+ "metric_median_file = Path(os.path.join(\"D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/模型运行/basins_test_metrics_median_all_cases.csv\"))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "metric_mean = pd.read_csv(metric_mean_file, index_col=0)\n",
+ "metric_median = pd.read_csv(metric_median_file, index_col=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " Bias | \n",
+ " RMSE | \n",
+ " ubRMSE | \n",
+ " Corr | \n",
+ " R2 | \n",
+ " NSE | \n",
+ " KGE | \n",
+ " FHV | \n",
+ " FLV | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " HFsources | \n",
+ " -10.431724 | \n",
+ " 84.941974 | \n",
+ " 84.29898 | \n",
+ " 0.625164 | \n",
+ " 0.380791 | \n",
+ " 0.380791 | \n",
+ " 0.308037 | \n",
+ " -30.076788 | \n",
+ " -100.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " Bias RMSE ubRMSE Corr R2 NSE \\\n",
+ "HFsources -10.431724 84.941974 84.29898 0.625164 0.380791 0.380791 \n",
+ "\n",
+ " KGE FHV FLV \n",
+ "HFsources 0.308037 -30.076788 -100.0 "
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "metric_mean"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " Bias | \n",
+ " RMSE | \n",
+ " ubRMSE | \n",
+ " Corr | \n",
+ " R2 | \n",
+ " NSE | \n",
+ " KGE | \n",
+ " FHV | \n",
+ " FLV | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " HFsources | \n",
+ " -10.431724 | \n",
+ " 84.941974 | \n",
+ " 84.29898 | \n",
+ " 0.625164 | \n",
+ " 0.380791 | \n",
+ " 0.380791 | \n",
+ " 0.308037 | \n",
+ " -30.076788 | \n",
+ " -100.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " Bias RMSE ubRMSE Corr R2 NSE \\\n",
+ "HFsources -10.431724 84.941974 84.29898 0.625164 0.380791 0.380791 \n",
+ "\n",
+ " KGE FHV FLV \n",
+ "HFsources 0.308037 -30.076788 -100.0 "
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "metric_median"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.9.7 ('xaj')",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ },
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "5ff2b0240d3185dc85fb5f0a6365eefe977ea7c2afca8c64838dc9fcf4f02a96"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/hydromodel/calibrate/calibrate_sceua.py b/hydromodel/calibrate/calibrate_sceua.py
index e65f818..0c3f507 100644
--- a/hydromodel/calibrate/calibrate_sceua.py
+++ b/hydromodel/calibrate/calibrate_sceua.py
@@ -18,7 +18,7 @@ def __init__(
warmup_length=365,
model={
"name": "xaj_mz",
- "source_type": "sources",
+ "source_type": "sources5mm",
"source_book": "HF",
},
obj_func=None,
@@ -128,44 +128,44 @@ def objectivefunction(
float
likelihood
"""
- # 切片
- # time = pd.read_excel('/home/ldaning/code/biye/hydro-model-xaj/hydromodel/example/zhandian/洪水率定时间1.xlsx')
- # calibrate_starttime = pd.to_datetime("2014-7-10 0:00")
- # calibrate_endtime = pd.to_datetime("2020-6-24 0:00")
- # total = 0
- # count = 0
- # for i in range(len(time)):
- # if(time.iloc[i,0]>>>>>> wangjingyi1999-event
best_model_run = results[bestindex]
- fields = [word for word in best_model_run.dtype.names if word.startswith("par")]
+ fields = [word for word in best_model_run.dtype.names if word.startswith("par")]
best_calibrate_params = pd.DataFrame(list(best_model_run[fields]))
save_file = os.path.join(save_dir, basin_id + "_calibrate_params.txt")
best_calibrate_params.to_csv(save_file, sep=",", index=False, header=True)
+<<<<<<< HEAD
return np.array(best_calibrate_params).reshape(1, -1) # 返回一列最佳的结果
+=======
+ return np.array(best_calibrate_params).reshape(1, -1) #返回一列最佳的结果
+>>>>>>> wangjingyi1999-event
def summarize_parameters(result_dir, model_info: dict):
@@ -214,8 +222,15 @@ def read_and_save_et_ouputs(result_dir, fold: int):
)
train_period = data_info_train["time"]
test_period = data_info_test["time"]
- train_np_file = os.path.join(exp_dir, "data_info_fold" + str(fold) + "_train.npy")
- test_np_file = os.path.join(exp_dir, "data_info_fold" + str(fold) + "_test.npy")
+ # TODO: basins_lump_p_pe_q_fold NAME need to be unified
+ train_np_file = os.path.join(
+ exp_dir, "data_info_fold" + str(fold) + "_train.npy"
+ )
+ test_np_file = os.path.join(
+ exp_dir, "data_info_fold" + str(fold) + "_test.npy"
+ )
+ # train_np_file = os.path.join(exp_dir, f"basins_lump_p_pe_q_fold{fold}_train.npy")
+ # test_np_file = os.path.join(exp_dir, f"basins_lump_p_pe_q_fold{fold}_test.npy")
train_data = np.load(train_np_file)
test_data = np.load(test_np_file)
es_test = []
@@ -256,7 +271,7 @@ def read_and_save_et_ouputs(result_dir, fold: int):
# "exp61561",
# "Dec08_11-38-48_LAPTOP-DNQOPPMS_fold1_HFsourcesrep1000ngs1000",
)
- read_and_save_et_ouputs(one_model_one_hyperparam_setting_dir, fold=1)
+ read_and_save_et_ouputs(one_model_one_hyperparam_setting_dir, fold=0)
# summarize_parameters(one_model_one_hyperparam_setting_dir, {"name": "xaj_mz"})
# renormalize_params(one_model_one_hyperparam_setting_dir, {"name":"xaj_mz"})
# summarize_metrics(one_model_one_hyperparam_setting_dir,{"name":"xaj_mz"})
diff --git a/hydromodel/models/model_config.py b/hydromodel/models/model_config.py
index 4e09939..f307600 100644
--- a/hydromodel/models/model_config.py
+++ b/hydromodel/models/model_config.py
@@ -1,7 +1,7 @@
"""
Author: Wenyu Ouyang
Date: 2022-10-25 21:16:22
-LastEditTime: 2022-12-25 16:06:05
+LastEditTime: 2024-03-21 20:12:31
LastEditors: Wenyu Ouyang
Description: some basic config for hydro-model-xaj models
FilePath: \hydro-model-xaj\hydromodel\models\model_config.py
@@ -77,9 +77,9 @@
],
"param_range": OrderedDict(
{
- "K": [0.1, 1.0],
- "B": [0.1, 0.4],
- "IM": [0.01, 0.1],
+ "K": [0.5, 1.0],
+ "B": [0.2, 0.4],
+ "IM": [0.07, 0.1],
"UM": [0.0, 20.0],
"LM": [60.0, 90.0],
"DM": [60.0, 120.0],
@@ -93,6 +93,23 @@
"CI": [0.0, 0.9],
"CG": [0.98, 0.998],
"KERNEL": [1, 15],
+
+ # "K": [0.1, 1.0],
+ # "B": [0.1, 0.4],
+ # "IM": [0.01, 0.1],
+ # "UM": [0.0, 20.0],
+ # "LM": [60.0, 90.0],
+ # "DM": [60.0, 120.0],
+ # "C": [0.0, 0.2],
+ # "SM": [5, 10],
+ # "EX": [1.0, 1.5],
+ # "KI": [0.0, 0.7],
+ # "KG": [0.0, 0.7],
+ # "A": [0.0, 2.9],
+ # "THETA": [0.0, 6.5],
+ # "CI": [0.0, 0.9],
+ # "CG": [0.98, 0.998],
+ # "KERNEL": [1, 15],
}
),
},
diff --git a/hydromodel/utils/plots.py b/hydromodel/utils/plots.py
index 03cc2e4..1e942ab 100644
--- a/hydromodel/utils/plots.py
+++ b/hydromodel/utils/plots.py
@@ -1,7 +1,7 @@
"""
Author: Wenyu Ouyang
Date: 2022-10-25 21:16:22
-LastEditTime: 2024-03-21 18:41:51
+LastEditTime: 2024-03-21 20:07:52
LastEditors: Wenyu Ouyang
Description: Plots for calibration and testing results
FilePath: \hydro-model-xaj\hydromodel\utils\plots.py
@@ -99,7 +99,6 @@ def show_calibrate_result(
None
"""
# Load the results gained with the sceua sampler, stored in SCEUA_xaj.csv
-
# results = []
# for chunk in pd.read_csv(sceua_calibrated_file, chunksize=100000 ):
# results.append(chunk)
@@ -157,35 +156,79 @@ def show_calibrate_result(
stat_error, os.path.join(save_dir, "train_metrics.json")
)
- # 循还画图
- # time = pd.read_excel('/home/ldaning/code/biye/hydro-model-xaj/hydromodel/example/zhandian/洪水率定时间1.xlsx')
- # calibrate_starttime = pd.to_datetime("2014-7-10 0:00")
- # calibrate_endtime = pd.to_datetime("2020-6-24 0:00")
- # basin_area = float(basin_area)
- # best_simulation = [x * (basin_area*1000000/1000/3600) for x in best_simulation]
- # obs = [x * (basin_area*1000000/1000/3600) for x in spot_setup.evaluation()]
+ #循还画图
+ time = pd.read_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/洪水率定时间.xlsx')
+ calibrate_starttime = pd.to_datetime("2012-06-10 0:00:00")
+ calibrate_endtime = pd.to_datetime("2019-12-31 23:00:00")
+ basin_area = float(basin_area)
+ best_simulation = [x * (basin_area*1000000/1000/3600) for x in best_simulation]
+ obs = [x * (basin_area*1000000/1000/3600) for x in spot_setup.evaluation()]
+ time['starttime']=pd.to_datetime(time['starttime'])
+ time['endtime']=pd.to_datetime(time['endtime'])
+ Prcp_list=[]
+ W_obs_list=[]
+ W_sim_list=[]
+ W_bias_abs_list=[]
+ W_bias_rela_list=[]
+ Q_max_obs_list=[]
+ Q_max_sim_list=[]
+ Q_bias_rela_list=[]
+ time_bias_list=[]
+ DC_list=[]
+ ID_list=[]
+ for i, row in time.iterrows():
# for i in range(len(time)):
- ## if(time.iloc[i,0]=0):
+ numerator+=pow(abs(mol[i])-obs[i],2)
+ meangauge+=obs[i]
+ count+=1
+ meangauge=meangauge/count
+ for i in range(len(obs)):
+ if (obs[i]>=0):
+ denominator+=pow(obs[i]-meangauge,2)
+ return 1-numerator/denominator
\ No newline at end of file
diff --git a/hydromodel/utils/units.py b/hydromodel/utils/units.py
index 5ec0c59..846aa44 100644
--- a/hydromodel/utils/units.py
+++ b/hydromodel/utils/units.py
@@ -1,19 +1,18 @@
"""
Author: Wenyu Ouyang
Date: 2022-12-08 09:24:54
-LastEditTime: 2023-12-17 20:37:42
+LastEditTime: 2024-03-21 20:08:16
LastEditors: Wenyu Ouyang
Description: some util funcs for hydro model
-FilePath: /hydro-model-xaj/hydromodel/utils/hydro_constant.py
+FilePath: \hydro-model-xaj\hydromodel\utils\units.py
Copyright (c) 2021-2022 Wenyu Ouyang. All rights reserved.
"""
# unify the unit of each variable
# TODO: check which one is correct
unit = {"streamflow": "m3/s"}
-
-
# unit = {"streamflow": "m^3/s"}
+
def convert_unit(data, unit_now, unit_final, **kwargs):
"""
convert unit of variable
diff --git a/scripts/calibrate_xaj.py b/scripts/calibrate_xaj.py
index b0629d8..eb7deb9 100644
--- a/scripts/calibrate_xaj.py
+++ b/scripts/calibrate_xaj.py
@@ -33,12 +33,10 @@ def calibrate(args):
algo_info = args.algorithm
comment = args.comment
data_dir = os.path.join(definitions.ROOT_DIR, "hydromodel", "example", exp)
- data_dir = os.path.join(
- "/home/ldaning/code/biye/hydro-model-xaj/hydromodel/example/model_run_wuxi7"
- )
+ data_dir = os.path.join('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/模型运行/')
kfold = [
int(f_name[len("data_info_fold") : -len("_test.json")])
- for f_name in os.listdir(data_dir) # 输出文件夹下的所有文件
+ for f_name in os.listdir(data_dir) #输出文件夹下的所有文件
if fnmatch.fnmatch(f_name, "*_fold*_test.json")
]
kfold = np.sort(kfold)
@@ -47,11 +45,15 @@ def calibrate(args):
train_data_info_file = os.path.join(
data_dir, f"data_info_fold{str(fold)}_train.json"
)
- train_data_file = os.path.join(data_dir, f"data_info_fold{str(fold)}_train.npy")
+ train_data_file = os.path.join(
+ data_dir, f"data_info_fold{str(fold)}_train.npy"
+ )
test_data_info_file = os.path.join(
data_dir, f"data_info_fold{str(fold)}_test.json"
)
- test_data_file = os.path.join(data_dir, f"data_info_fold{str(fold)}_test.npy")
+ test_data_file = os.path.join(
+ data_dir, f"data_info_fold{str(fold)}_test.npy"
+ )
if (
os.path.exists(train_data_info_file) is False
or os.path.exists(train_data_file) is False
@@ -120,7 +122,7 @@ def calibrate(args):
qsim, _ = xaj( # 计算模拟结果
data_test[:, i : i + 1, 0:2],
params,
- warmup_length=warmup,
+ warmup_length=0 ,
**model_info,
)
@@ -138,7 +140,7 @@ def calibrate(args):
unit_final=units.unit["streamflow"],
basin_area=basin_area,
)
- test_result_file = os.path.join(
+ test_result_file = os.path.join(
spotpy_db_dir,
"test_qsim_" + model_info["name"] + "_" + str(basin_id) + ".csv",
)
diff --git a/test/cchz_diso.py b/test/cchz_diso.py
new file mode 100644
index 0000000..c712ce7
--- /dev/null
+++ b/test/cchz_diso.py
@@ -0,0 +1,163 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import pandas as pd
+import os
+
+
+
+time = pd.read_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/DMCA.xlsx')
+time['starttime'] = pd.to_datetime(time['starttime'], format='%d/%m/%Y %H:%M')
+time['endtime'] = pd.to_datetime(time['endtime'], format='%d/%m/%Y %H:%M')
+sim = pd.read_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/picture.xlsx')
+sim['date'] = pd.to_datetime(sim['date'], format='%d/%m/%Y %H:%M')
+basin_area= 2814
+assess_xaj_list = []
+assess_dhf_list = []
+W_bias_rela_xaj_list = []
+Q_bias_rela_xaj_list = []
+NSE_xaj_list = []
+W_bias_rela_dhf_list = []
+Q_bias_rela_dhf_list = []
+NSE_dhf_list = []
+W_obs_list = []
+W_sim_xaj_list = []
+W_sim_dhf_list = []
+Q_max_obs_list = []
+Q_max_sim_xaj_list = []
+Q_max_sim_dhf_list = []
+for i in range(len(time)):
+ start_time = time['starttime'][i]
+ end_time = time['endtime'][i]
+ start_num = np.where(sim['date'] == start_time)[0]
+ end_num = np.where(sim['date'] == end_time)[0]
+ # date = pd.date_range(start_time, end_time, freq='H')
+ start_num = int(start_num)
+ end_num = int(end_num)
+ date =sim['date'][start_num:end_num]
+ sim_xaj = sim['sim_xaj'][start_num:end_num]
+ obs = sim['streamflow(m3/s)'][start_num:end_num]
+ sim_dhf = sim['sim_dhf'][start_num:end_num]
+ sim_xaj = np.array(sim_xaj)
+ obs = np.array(obs)
+ sim_dhf = np.array(sim_dhf)
+ numerator = 0
+ denominator = 0
+ meangauge = 0
+ count = 0
+ for h in range(len(obs)):
+ if (obs[h]>=0):
+ numerator+=pow(abs(sim_xaj[h])-obs[h],2)
+ meangauge+=obs[h]
+ count+=1
+ meangauge=meangauge/count
+ for m in range(len(obs)):
+ if (obs[m]>=0):
+ denominator+=pow(obs[m]-meangauge,2)
+ NSE_xaj= 1-numerator/denominator
+ W_obs=sum(obs)*3600*1000/basin_area/1000000
+ W_sim_xaj = sum(sim_xaj) * 3600 * 1000 /basin_area/ 1000000
+ W_bias_abs_xaj=W_sim_xaj-W_obs
+ W_bias_rela_xaj = abs(W_sim_xaj-W_obs)/W_obs
+ Q_max_obs=np.max(obs)
+ Q_max_sim_xaj=np.max(sim_xaj)
+ Q_bias_rela_xaj = abs(Q_max_sim_xaj-Q_max_obs)/Q_max_obs
+
+ assess_xaj = [W_bias_rela_xaj,Q_bias_rela_xaj,NSE_xaj]
+ numerator = 0
+ denominator = 0
+ meangauge = 0
+ count = 0
+ for h in range(len(obs)):
+ if (obs[h]>=0):
+ numerator+=pow(abs(sim_dhf[h])-obs[h],2)
+ meangauge+=obs[h]
+ count+=1
+ meangauge=meangauge/count
+ for m in range(len(obs)):
+ if (obs[m]>=0):
+ denominator+=pow(obs[m]-meangauge,2)
+ NSE_dhf= 1-numerator/denominator
+ W_sim_dhf = sum(sim_dhf) * 3600 * 1000 /basin_area/ 1000000
+ W_bias_abs_dhf=W_sim_dhf-W_obs
+ W_bias_rela_dhf = abs(W_sim_dhf-W_obs)/W_obs
+ Q_max_obs=np.max(obs)
+ Q_max_sim_dhf=np.max(sim_dhf)
+ Q_bias_rela_dhf = abs(Q_max_sim_dhf-Q_max_obs)/Q_max_obs
+ assess_dhf = [W_bias_rela_dhf,Q_bias_rela_dhf,NSE_dhf]
+ W_bias_rela_xaj_list.append(W_bias_rela_xaj)
+ Q_bias_rela_xaj_list.append(Q_bias_rela_xaj)
+ NSE_xaj_list.append(NSE_xaj)
+ W_bias_rela_dhf_list.append(W_bias_rela_dhf)
+ Q_bias_rela_dhf_list.append(Q_bias_rela_dhf)
+ NSE_dhf_list.append(NSE_dhf)
+ W_obs_list.append (W_obs)
+ W_sim_xaj_list.append(W_sim_xaj)
+ W_sim_dhf_list.append(W_sim_dhf)
+ Q_max_obs_list.append(Q_max_obs)
+ Q_max_sim_xaj_list.append(Q_max_sim_xaj)
+ Q_max_sim_dhf_list.append(Q_max_sim_dhf)
+ assess_obs=[0,0,1]
+ x = np.vstack((assess_obs,assess_xaj, assess_dhf))
+ # 找出最后一列小于 0 的行的索引
+ negative_indices = np.where(x[:, -1] < 0)[0]
+
+ # 如果存在最后一列小于 0 的行,则对这些行进行归一化处理
+ if negative_indices.any():
+ for idx in negative_indices:
+ x[idx] = (x[idx]-min(x[:, -1]))/(max(x[:, -1])-min(x[:, -1]))
+ dfmax = x
+ diso_dm3 = np.zeros((3, 1))
+ for i in range(2):
+ dm3 = np.sqrt((dfmax[i+1, 0] - dfmax[0, 0])**2 + (dfmax[i+1, 1] - dfmax[0, 1])**2 + (dfmax[i+1, 2] - dfmax[0, 2])**2)
+ diso_dm3[i, 0] = dm3
+
+ assess_xaj_nor = diso_dm3[0,0]
+ assess_dhf_nor = diso_dm3[1,0]
+ assess_xaj_list.append(assess_xaj_nor)
+ assess_dhf_list.append(assess_dhf_nor)
+
+changci_df = pd.DataFrame({
+ 'start_time':time['starttime'],
+ 'end_time':time['endtime'],
+ 'W_obs':W_obs_list,
+ 'W_sim_xaj':W_sim_xaj_list,
+ 'W_sim_dhf':W_sim_dhf_list,
+ 'Q_max_obs':Q_max_obs_list,
+ 'Q_max_sim_xaj':Q_max_sim_xaj_list,
+ 'Q_max_sim_dhf':Q_max_sim_dhf_list,
+ 'W_bias_rela_xaj':W_bias_rela_xaj_list,
+ 'W_bias_rela_dhf':W_bias_rela_dhf_list,
+ 'Q_bias_rela_xaj':Q_bias_rela_xaj_list,
+ 'Q_bias_rela_dhf':Q_bias_rela_dhf_list,
+ 'NSE_xaj':NSE_xaj_list,
+ 'NSE_dhf':NSE_dhf_list,
+ 'assess_xaj':assess_xaj_list,
+ 'assess_dhf':assess_dhf_list
+ })
+changci_df.to_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/changci.xlsx')
+ # # 拼接 diso 矩阵
+ # diso = np.concatenate((diso_dm1, diso_dm2, diso_dm3), axis=1)
+
+ # # 绘制二维 diso 图
+ # plt.figure()
+ # plt.scatter(dfmax[:, 0], dfmax[:, 1], marker='o')
+
+ # # 添加文本标注
+ # plt.text(0.88, 0, 'OBS(1,0)', fontsize=8)
+ # plt.text(0.33, 0.4, 's1(x1,y1)', fontsize=8)
+ # plt.text(0.07, 1, 's2(x2,y2)', fontsize=8)
+ # plt.text(0.47, 0.95, 's3(x3,y3)', fontsize=8)
+
+ # # 添加线段
+ # plt.plot([dfmax[0, 0], dfmax[0, 0]], [dfmax[0, 1], dfmax[0, 1]], color='red', linewidth=2.5)
+ # plt.plot([dfmax[0, 0], dfmax[1, 0]], [dfmax[0, 1], dfmax[1, 1]], color='blue', linewidth=2.5)
+ # plt.plot([dfmax[0, 0], dfmax[2, 0]], [dfmax[0, 1], dfmax[2, 1]], color='black', linewidth=2.5)
+
+ # plt.xlabel('CC')
+ # plt.ylabel('NAE')
+ # plt.xlim(0, 1)
+ # plt.ylim(0, 1)
+
+ # save_fig = os.path.join('D:/研究生/毕业论文/new毕业论文/预答辩/英那河/站点信息/CCHZ', "results"+str(i)+".png")
+ # plt.savefig(save_fig, bbox_inches="tight")
+ # plt.close()
\ No newline at end of file
diff --git a/test/picture.py b/test/picture.py
new file mode 100644
index 0000000..5fa0e1a
--- /dev/null
+++ b/test/picture.py
@@ -0,0 +1,116 @@
+from matplotlib import pyplot as plt
+import pandas as pd
+import os
+import numpy as np
+from numpy import *
+import matplotlib.dates as mdates
+import sys
+from pathlib import Path
+sys.path.append(os.path.dirname(Path(os.path.abspath(__file__)).parent.parent))
+# from hydromodel.utils import hydro_constant
+
+time = pd.read_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/DMCA.xlsx')
+time['starttime'] = pd.to_datetime(time['starttime'], format='%d/%m/%Y %H:%M')
+time['endtime'] = pd.to_datetime(time['endtime'], format='%d/%m/%Y %H:%M')
+sim = pd.read_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/picture.xlsx')
+sim['date'] = pd.to_datetime(sim['date'], format='%d/%m/%Y %H:%M')
+for i in range(len(time)):
+ start_time = time['starttime'][i]
+ end_time = time['endtime'][i]
+ start_num = np.where(sim['date'] == start_time)[0]
+ end_num = np.where(sim['date'] == end_time)[0]
+ # date = pd.date_range(start_time, end_time, freq='H')
+ start_num = int(start_num)
+ end_num = int(end_num)
+ date =sim['date'][start_num:end_num]
+ sim_xaj = sim['sim_xaj'][start_num:end_num]
+ sim_dhf = sim['sim_dhf'][start_num:end_num]
+ obs = sim['streamflow(m3/s)'][start_num:end_num]
+ prcp = sim['prcp(mm/hour)'][start_num:end_num]
+ fig = plt.figure(figsize=(9,6),dpi=500)
+ ax = fig.subplots()
+ ax.plot(
+ date,
+ sim_xaj,
+ color="blue",
+ linestyle="-",
+ linewidth=1,
+ label="Simulation_xaj",
+ )
+ ax.plot(
+ date,
+ sim_dhf,
+ color="green",
+ linestyle="-",
+ linewidth=1,
+ label="Simulation_dhf",
+ )
+ ax.plot(
+ date,
+ obs,
+ # "r.",
+ color="black",
+ linestyle="-",
+ linewidth=1,
+ label="Observation",
+ )
+ ylim = np.max(np.vstack((obs, sim_xaj)))
+ print(start_time)
+ ax.set_ylim(0, ylim*1.3)
+ ax.xaxis.set_major_formatter(mdates.DateFormatter("%y-%m-%d"))
+ xlabel="Date(∆t=1hour)"
+ ylabel="Streamflow(m^3/s)"
+ ax.set_xlabel(xlabel)
+ ax.set_ylabel(ylabel)
+ plt.legend(loc="upper right")
+ # sim_xaj = np.array(sim_xaj)
+ # obs = np.array(obs)
+ # numerator = 0
+ # denominator = 0
+ # meangauge = 0
+ # count = 0
+ # for h in range(len(obs)):
+ # if (obs[h]>=0):
+ # numerator+=pow(abs(sim_xaj[h])-obs[h],2)
+ # meangauge+=obs[h]
+ # count+=1
+ # meangauge=meangauge/count
+ # for m in range(len(obs)):
+ # if (obs[m]>=0):
+ # denominator+=pow(obs[m]-meangauge,2)
+ # NSE= 1-numerator/denominator
+ # plt.text(0.9, 0.6, 'NSE=%.2f' % NSE,
+ # horizontalalignment='center',
+ # verticalalignment='center',
+ # transform = ax.transAxes,
+ # fontsize=10)
+
+ ax2 = ax.twinx()
+ ax2.bar(date,prcp, label='Precipitation', color='royalblue',alpha=0.9,width=0.05)
+ ax2.set_ylabel('Precipitation(mm)')
+ plt.yticks(fontproperties = 'Times New Roman', size = 10)
+ prcp_max = np.max(prcp)
+ ax2.set_ylim(0, prcp_max*4)
+ ax2.invert_yaxis() #y轴反向
+ ax2.legend(loc='upper left')
+ plt.tight_layout() # 自动调整子图参数,使之填充整个图像区域
+ save_fig = os.path.join('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/plot', "results"+str(i)+".png")
+ plt.savefig(save_fig, bbox_inches="tight")
+ plt.close()
+
+
+def NSE(obs,sim_xaj):
+ numerator = 0
+ denominator = 0
+ meangauge = 0
+ count = 0
+ for i in range(len(obs)):
+ if (obs[i]>=0):
+ numerator+=pow(abs(sim_xaj[i])-obs[i],2)
+ meangauge+=obs[i]
+ count+=1
+ meangauge=meangauge/count
+ for i in range(len(obs)):
+ if (obs[i]>=0):
+ denominator+=pow(obs[i]-meangauge,2)
+ NSE= 1-numerator/denominator
\ No newline at end of file
diff --git a/test/test_data.py b/test/test_data.py
index 30f90f0..40331a6 100644
--- a/test/test_data.py
+++ b/test/test_data.py
@@ -63,6 +63,7 @@
# def test_save_data(txt_file, json_file, npy_file):
data = pd.read_csv(txt_file)
+datetime_index = pd.to_datetime(data["date"], format="%Y/%m/%d %H:%M")
# Note: The units are all mm/day! For streamflow, data is divided by basin's area
# variables = ["prcp(mm/day)", "petfao56(mm/day)", "streamflow(mm/day)"]
variables = ["prcp(mm/hour)", "pev(mm/hour)", "streamflow(m3/s)"]
diff --git a/xaj/configuration.py b/xaj/configuration.py
new file mode 100644
index 0000000..be41d42
--- /dev/null
+++ b/xaj/configuration.py
@@ -0,0 +1,51 @@
+import yaml
+from hydromodel.data.data_preprocess import split_train_test, cross_valid_data
+from pathlib import Path
+import numpy as np
+from xaj.xajmodel import xaj_state as xaj_state
+
+class configuration():
+
+ def read_config(config_file):
+ with open(config_file) as f:
+ config = yaml.safe_load(f)
+ return config
+
+ def process_inputs(forcing_file, json_file, npy_file, config):
+
+
+ train_period = config['train_period']
+ test_period = config['test_period']
+ period = config['period']
+ json_file = Path(json_file)
+ npy_file = Path(npy_file)
+ split_train_test(json_file, npy_file, train_period, test_period)
+
+ warmup_length = config['warmup_length']
+ cv_fold = config['cv_fold']
+ cross_valid_data(json_file, npy_file, period, warmup_length, cv_fold)
+
+ def extract_forcing(forcing_data):
+ # p_and_e_df = forcing_data[["rainfall[mm]", "TURC [mm d-1]"]]
+ p_and_e_df = forcing_data[["pre", "turc"]]
+ p_and_e= np.expand_dims(p_and_e_df.values, axis=1)
+ return p_and_e_df, p_and_e
+
+ def warmup(p_and_e_warmup,params_state, warmup_length):
+
+ q_sim, es, *w0, w1, w2,s0, fr0, qi0, qg0 = xaj_state(
+ p_and_e_warmup,
+ params_state,
+ warmup_length= warmup_length,
+ source_book="HF",
+ source_type="sources",
+ return_state=True,
+ )
+ return q_sim,es,*w0, w1, w2, s0, fr0, qi0, qg0
+ print(*w0, w1, w2, s0, fr0, qi0, qg0)
+ def get_time_config(config):
+ start_time_str = config['start_time_str']
+ end_time_str = config['end_time_str']
+ time_units = config['time_units']
+ return start_time_str, end_time_str, time_units
+
diff --git a/xaj/constant_unit.py b/xaj/constant_unit.py
new file mode 100644
index 0000000..f09eef7
--- /dev/null
+++ b/xaj/constant_unit.py
@@ -0,0 +1,63 @@
+"""
+Author: Wenyu Ouyang
+Date: 2022-12-08 09:24:54
+LastEditTime: 2022-12-08 09:51:54
+LastEditors: Wenyu Ouyang
+Description: some constant for hydro model
+FilePath: /hydro-model-xaj/hydromodel/utils/hydro_constant.py
+Copyright (c) 2021-2022 Wenyu Ouyang. All rights reserved.
+"""
+# unify the unit of each variable
+unit = {"streamflow": "m3/s"}
+def convert_unit(data, unit_now, unit_final, **kwargs):
+ """
+ convert unit of variable
+
+ Parameters
+ ----------
+ data
+ data to be converted
+ unit_now
+ unit of variable now
+ unit_final
+ unit of variable after conversion
+ **kwargs
+ other parameters required for conversion
+
+ Returns
+ -------
+ data
+ data after conversion
+ """
+ if unit_now == "mm/day" and unit_final == "m3/s":
+ result = mm_per_day_to_m3_per_sec(basin_area=kwargs["basin_area"], q=data)
+ else:
+ raise ValueError("unit conversion not supported")
+ return result
+
+
+def mm_per_day_to_m3_per_sec(basin_area, q):
+ """
+ trans mm/day to m3/s for xaj models
+
+ Parameters
+ ----------
+ basin_area
+ we need to know the area of a basin so that we can perform this transformation
+ q
+ original streamflow data
+
+ Returns
+ -------
+
+ """
+ # 1 ft3 = 0.02831685 m3
+ # ft3tom3 = 2.831685e-2
+ # 1 km2 = 10^6 m2
+ km2tom2 = 1e6
+ # 1 m = 1000 mm
+ mtomm = 1000
+ # 1 day = 24 * 3600 s
+ daytos = 24 * 3600
+ q_trans = q * basin_area * km2tom2 / (mtomm * daytos)
+ return q_trans
diff --git a/xaj/params.py b/xaj/params.py
new file mode 100644
index 0000000..43b654e
--- /dev/null
+++ b/xaj/params.py
@@ -0,0 +1,141 @@
+"""
+Author: Wenyu Ouyang
+Date: 2022-10-25 21:16:22
+LastEditTime: 2022-12-25 16:06:05
+LastEditors: Wenyu Ouyang
+Description: some basic config for hydro-model-xaj models
+FilePath: \hydro-model-xaj\hydromodel\models\model_config.py
+Copyright (c) 2021-2022 Wenyu Ouyang. All rights reserved.
+"""
+from collections import OrderedDict
+
+# NOTE: Don't change the parameter settings
+
+MODEL_PARAM_DICT = {
+ "xaj": {
+ "param_name": [
+ # Allen, R.G., L. Pereira, D. Raes, and M. Smith, 1998.
+ # Crop Evapotranspiration, Food and Agriculture Organization of the United Nations,
+ # Rome, Italy. FAO publication 56. ISBN 92-5-104219-5. 290p.
+ "K", # ratio of potential evapotranspiration to reference crop evaporation generally from Allen, 1998
+ "B", # The exponent of the tension water capacity curve
+ "IM", # The ratio of the impervious to the total area of the basin
+ "UM", # Tension water capacity in the upper layer
+ "LM", # Tension water capacity in the lower layer
+ "DM", # Tension water capacity in the deepest layer
+ "C", # The coefficient of deep evapotranspiration
+ "SM", # The areal mean of the free water capacity of surface soil layer
+ "EX", # The exponent of the free water capacity curve
+ "KI", # Outflow coefficients of interflow
+ "KG", # Outflow coefficients of groundwater
+ "CS", # The recession constant of channel system
+ "L", # Lag time
+ "CI", # The recession constant of the lower interflow
+ "CG", # The recession constant of groundwater storage
+ ],
+ # "param_range": OrderedDict(
+ # {
+ # "K": [0.1, 1.0],
+ # "B": [0.1, 0.4],
+ # "IM": [0.01, 0.1],
+ # "UM": [0.0, 20.0],
+ # "LM": [60.0, 90.0],
+ # "DM": [60.0, 120.0],
+ # "C": [0.0, 0.2],
+ # "SM": [1, 100.0],
+ # "EX": [1.0, 1.5],
+ # "KI": [0.0, 0.7],
+ # "KG": [0.0, 0.7],
+ # "CS": [0.0, 1.0],
+ # "L": [1.0, 10.0], # unit is day
+ # "CI": [0.0, 0.9],
+ # "CG": [0.98, 0.998],
+ # }
+ # ),
+ "param_range": OrderedDict(
+ {
+ "K": [0.1, 0.1],
+ "B": [0.1, 0.1],
+ "IM": [0.1, 0.1],
+ "UM": [10, 10.0],
+ "LM": [70.0, 70.0],
+ "DM": [80.0, 80.0],
+ "C": [0.1, 0.1],
+ "SM": [50, 50],
+ "EX": [1.2, 1.2],
+ "KI": [0.3, 0.3],
+ "KG": [0.3, 0.3],
+ "CS": [0.5, 0.5],
+ "L": [5.0, 5.0], # unit is day
+ "CI": [0.5, 0.5],
+ "CG": [0.98, 0.98],
+ }
+ ),
+ },
+ "xaj_mz": {
+ "param_name": [
+ # Allen, R.G., L. Pereira, D. Raes, and M. Smith, 1998.
+ # Crop Evapotranspiration, Food and Agriculture Organization of the United Nations,
+ # Rome, Italy. FAO publication 56. ISBN 92-5-104219-5. 290p.
+ "K", # ratio of potential evapotranspiration to reference crop evaporation generally from Allen, 1998
+ "B", # The exponent of the tension water capacity curve
+ "IM", # The ratio of the impervious to the total area of the basin
+ "UM", # Tension water capacity in the upper layer
+ "LM", # Tension water capacity in the lower layer
+ "DM", # Tension water capacity in the deepest layer
+ "C", # The coefficient of deep evapotranspiration
+ "SM", # The areal mean of the free water capacity of surface soil layer
+ "EX", # The exponent of the free water capacity curve
+ "KI", # Outflow coefficients of interflow
+ "KG", # Outflow coefficients of groundwater
+ "A", # parameter of mizuRoute
+ "THETA", # parameter of mizuRoute
+ "CI", # The recession constant of the lower interflow
+ "CG", # The recession constant of groundwater storage
+ "KERNEL", # kernel size of mizuRoute unit hydrograph when using convolution method
+ ],
+ "param_range": OrderedDict(
+ {
+ "K": [0.1, 1.0],
+ "B": [0.1, 0.4],
+ "IM": [0.01, 0.1],
+ "UM": [0.0, 20.0],
+ "LM": [60.0, 90.0],
+ "DM": [60.0, 120.0],
+ "C": [0.0, 0.2],
+ "SM": [1.0, 100.0],
+ "EX": [1.0, 1.5],
+ "KI": [0.0, 0.7],
+ "KG": [0.0, 0.7],
+ "A": [0.0, 2.9],
+ "THETA": [0.0, 6.5],
+ "CI": [0.0, 0.9],
+ "CG": [0.98, 0.998],
+ "KERNEL": [1, 15],
+ }
+ ),
+ },
+ "gr4j": {
+ "param_name": ["x1", "x2", "x3", "x4"],
+ "param_range": OrderedDict(
+ {
+ "x1": [100.0, 1200.0],
+ "x2": [-5.0, 3.0],
+ "x3": [20.0, 300.0],
+ "x4": [1.1, 2.9],
+ }
+ ),
+ },
+ "hymod": {
+ "param_name": ["cmax", "bexp", "alpha", "ks", "kq"],
+ "param_range": OrderedDict(
+ {
+ "cmax": [1.0, 500.0],
+ "bexp": [0.1, 2.0],
+ "alpha": [0.1, 0.99],
+ "ks": [0.001, 0.10],
+ "kq": [0.1, 0.99],
+ }
+ ),
+ },
+}
diff --git a/xaj/runxaj.yaml b/xaj/runxaj.yaml
new file mode 100644
index 0000000..dba9186
--- /dev/null
+++ b/xaj/runxaj.yaml
@@ -0,0 +1,21 @@
+# xaj model configuration
+#The current model runs on the daily time step
+ # start_time_str: "1990-01-01"
+ # end_time_str: "2009-12-31"
+
+ # time_units: "days"
+ basin_area: "66400"
+ start_time_str: "2021-06-01 00:00:00"
+ end_time_str: "2021-08-31 23:00:00"
+ time_units: "hours"
+# initial condition
+ # forcing_file: "/home/wangjingyi/code/hydro-model-xaj/hydromodel/example/hymod_input.csv"
+ forcing_file: "/home/wangjingyi/code/hydro-model-xaj/scripts/尼尔基降雨.csv"
+ warmup_length: 360
+# json_file: "/home/wangjingyi/code/hydro-model-xaj/hydromodel/example/01013500_lump_p_pe_q.json"
+# npy_file: "/home/wangjingyi/code/hydro-model-xaj/hydromodel/example/01013500_lump_p_pe_q.npy"
+
+# train_period: ["1990-01-01", "2000-12-31"]
+# test_period: ["2001-01-01", "2009-12-31"]
+# period: ["1990-01-01", "2009-12-31"]
+# cv_fold: 2
diff --git a/xaj/test-xaj-bmi.py b/xaj/test-xaj-bmi.py
new file mode 100644
index 0000000..b35e994
--- /dev/null
+++ b/xaj/test-xaj-bmi.py
@@ -0,0 +1,43 @@
+import logging
+logging.basicConfig(level=logging.INFO)
+from xaj_bmi import xajBmi
+import pandas as pd
+# from test.test_xaj import test_xaj
+# from configuration import configuration
+# import numpy as np
+model = xajBmi()
+print(model.get_component_name())
+
+
+model.initialize("xaj/runxaj.yaml")
+print("Start time:", model.get_start_time())
+print("End time:", model.get_end_time())
+print("Current time:", model.get_current_time())
+print("Time step:", model.get_time_step())
+print("Time units:", model.get_time_units())
+print(model.get_input_var_names())
+print(model.get_output_var_names())
+
+discharge = []
+ET = []
+time = []
+while model.get_current_time() <= model.get_end_time():
+ time.append(model.get_current_time())
+ model.update()
+
+discharge=model.get_value("discharge")
+ET=model.get_value("ET")
+
+results = pd.DataFrame({
+ 'discharge': discharge.flatten(),
+ 'ET': ET.flatten(),
+ })
+results.to_csv('/home/wangjingyi/code/hydro-model-xaj/scripts/xaj.csv')
+model.finalize()
+# params=np.tile([0.5], (1, 15))
+# config = configuration.read_config("scripts/runxaj.yaml")
+# forcing_data = pd.read_csv(config['forcing_file'])
+# p_and_e_df, p_and_e = configuration.extract_forcing(forcing_data)
+# test_xaj(p_and_e=p_and_e,params=params,warmup_length=360)
+
+
diff --git a/xaj/xaj_bmi.py b/xaj/xaj_bmi.py
new file mode 100644
index 0000000..2ee3fa9
--- /dev/null
+++ b/xaj/xaj_bmi.py
@@ -0,0 +1,274 @@
+from typing import Tuple
+from bmipy import Bmi
+import numpy as np
+
+from xaj.xajmodel import xaj_route, xaj_runoff
+from xaj.constant_unit import convert_unit,unit
+
+from grpc4bmi.constants import GRPC_MAX_MESSAGE_LENGTH
+import datetime
+import pandas as pd
+import logging
+from xaj.configuration import configuration
+
+logger = logging.getLogger(__name__)
+
+PRECISION = 1e-5
+class xajBmi(Bmi):
+ """Empty model wrapped in a BMI interface."""
+ name = "hydro-model-xaj"
+ input_var_names = ("precipitation","ETp")
+ output_var_names = ("ET","discharge")
+ var_units = {"precipitation": "mm/day", "ETp": "mm/day", "discharge": "mm/day", "ET": "mm/day"}
+
+
+ def __init__(self):
+ """Create a BmiHeat model that is ready for initialization."""
+ self.time_step = 0
+
+ def initialize(self, config_file):
+ try:
+ logger.info("xaj: initialize_model")
+ config = configuration.read_config(config_file)
+ forcing_data = pd.read_csv(config['forcing_file'])
+ p_and_e_df, p_and_e = configuration.extract_forcing(forcing_data)
+ p_and_e_warmup = p_and_e[0:config['warmup_length'],:,:]
+ params=np.tile([0.5], (1, 15))
+ self.q_sim_state,self.es_state,self.w0, self.w1,self.w2,self.s0, self.fr0, self.qi0, self.qg0 = configuration.warmup(p_and_e_warmup,params,config['warmup_length'])
+
+ self._start_time_str,self._end_time_str, self._time_units = configuration.get_time_config(config)
+
+ self.params = params
+ self.warmup_length = config['warmup_length']
+ self.p_and_e_df = p_and_e_df
+ self.p_and_e = p_and_e
+ self.config = config
+ self.basin_area = config['basin_area']
+
+ except:
+ import traceback
+ traceback.print_exc()
+ raise
+
+
+ def update(self):
+ """Update model for a single time step."""
+
+ self.time_step += 1
+ # p_and_e_sim = self.p_and_e[self.warmup_length+1:self.time_step+self.warmup_length+1]
+ p_and_e_sim = self.p_and_e[1:self.time_step+1]
+ self.runoff_im, self.rss_,self.ris_, self.rgs_, self.es_runoff, self.rss= xaj_runoff(p_and_e_sim,
+ w0=self.w0, s0=self.s0, fr0=self.fr0,
+ params_runoff=self.params,
+ return_state=False,
+ )
+ if self.time_step+self.warmup_length+1 >= self.p_and_e.shape[0]:
+ q_sim,es = xaj_route(p_and_e_sim,
+ params_route=self.params,
+ model_name = "xaj",
+ runoff_im=self.runoff_im,
+ rss_=self.rss_,
+ ris_=self.ris_,
+ rgs_=self.rgs_,
+ rss=self.rss,
+ qi0=self.qi0,
+ qg0=self.qg0,
+ es=self.es_runoff,
+ )
+ self.p_sim = p_and_e_sim[:,:,0]
+ self.e_sim = p_and_e_sim[:,:,1]
+ q_sim = convert_unit(
+ q_sim,
+ unit_now="mm/day",
+ unit_final=unit["streamflow"],
+ basin_area=float(self.basin_area),
+ )
+ self.q_sim = q_sim
+ self.es = es
+
+ def update_until(self, time):
+ while self.get_current_time() + 0.001 < time:
+ self.update()
+
+ def finalize(self) -> None:
+ """Finalize model."""
+ self.model = None
+
+ def get_component_name(self) -> str:
+ return "xaj"
+
+ def get_input_item_count(self) -> int:
+ return len(self.input_var_names)
+
+ def get_output_item_count(self) -> int:
+ return len(self.output_var_names)
+
+ def get_input_var_names(self) -> Tuple[str]:
+ return self.input_var_names
+
+ def get_output_var_names(self) -> Tuple[str]:
+ return self.output_var_names
+
+ def get_var_grid(self, name: str) -> int:
+ raise NotImplementedError()
+
+ def get_var_type(self, name: str) -> str:
+ return 'float64'
+
+ def get_var_units(self, name: str) -> str:
+ return self.var_units[name]
+
+ def get_var_itemsize(self, name: str) -> int:
+ return np.dtype(self.get_var_type(name)).itemsize
+
+ def get_var_nbytes(self, name: str) -> int:
+ return self.get_value_ptr(name).nbytes
+
+ def get_var_location(self, name: str) -> str:
+ raise NotImplementedError()
+
+ def get_start_time(self):
+ return self.start_Time(self._start_time_str)
+
+ def get_current_time(self):
+ # return self.start_Time(self._start_time_str) + datetime.timedelta(self.time_step+self.warmup_length)
+ if self._time_units == 'hours':
+ time_step = datetime.timedelta(hours=self.time_step)
+ elif self._time_units == 'days':
+ time_step = datetime.timedelta(days=self.time_step)
+ return self.start_Time(self._start_time_str)+ time_step
+
+ def get_end_time(self):
+ return self.end_Time(self._end_time_str)
+
+ def get_time_units(self) -> str:
+ return self._time_units
+
+ def get_time_step(self) -> float:
+ return 1
+
+ def get_value(self, name: str) -> None:
+ logger.info("getting value for var %s", name)
+ return self.get_value_ptr(name).flatten()
+
+ def get_value_ptr(self, name: str) -> np.ndarray:
+ if name == 'discharge':
+ return self.q_sim
+ elif name == 'ET':
+ return self.es
+
+ def get_value_at_indices(
+ self, name: str, inds: np.ndarray
+ ) -> np.ndarray:
+
+ return self.get_value_ptr(name).take(inds)
+
+
+ def set_value(self, name: str, src: np.ndarray):
+
+ val = self.get_value_ptr(name)
+ val[:] = src.reshape(val.shape)
+
+
+ def set_value_at_indices(
+ self, name: str, inds: np.ndarray, src: np.ndarray
+ ) -> None:
+ val = self.get_value_ptr(name)
+ val.flat[inds] = src
+
+ # Grid information
+ def get_grid_rank(self, grid: int) -> int:
+ raise NotImplementedError()
+
+ def get_grid_size(self, grid: int) -> int:
+ raise NotImplementedError()
+
+ def get_grid_type(self, grid: int) -> str:
+ raise NotImplementedError()
+
+ # Uniform rectilinear
+ def get_grid_shape(self, grid: int, shape: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_spacing(self, grid: int, spacing: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_origin(self, grid: int, origin: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ # Non-uniform rectilinear, curvilinear
+ def get_grid_x(self, grid: int, x: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_y(self, grid: int, y: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_z(self, grid: int, z: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_node_count(self, grid: int) -> int:
+ raise NotImplementedError()
+
+ def get_grid_edge_count(self, grid: int) -> int:
+ raise NotImplementedError()
+
+ def get_grid_face_count(self, grid: int) -> int:
+ raise NotImplementedError()
+
+ def get_grid_edge_nodes(self, grid: int, edge_nodes: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_face_edges(self, grid: int, face_edges: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_face_nodes(self, grid: int, face_nodes: np.ndarray) -> np.ndarray:
+ raise NotImplementedError()
+
+ def get_grid_nodes_per_face(
+ self, grid: int, nodes_per_face: np.ndarray
+ ) -> np.ndarray:
+ raise NotImplementedError()
+
+
+ def start_Time(self, _start_time_str):
+
+ try:
+ if " " in _start_time_str:
+ date, time = _start_time_str.split(" ")
+ else:
+ date = _start_time_str
+ time = None
+ year, month, day = date.split("-")
+ self._startTime = datetime.date(int(year), int(month), int(day))
+
+ if time:
+ hour, minute, second = time.split(":")
+ # self._startTime = self._startTime.replace(hour=int(hour),
+ # minute=int(minute),
+ # second=int(second))
+ self._startTime = datetime.datetime(int(year), int(month), int(day),int(hour),int(minute), int(second))
+ except ValueError:
+ raise ValueError("Invalid start date format!")
+
+ return self._startTime
+
+ def end_Time(self, _end_time_str):
+
+ try:
+ if " " in _end_time_str:
+ date, time = _end_time_str.split(" ")
+ else:
+ date = _end_time_str
+ time = None
+ year, month, day = date.split("-")
+ self._endTime = datetime.date(int(year), int(month), int(day))
+
+ if time:
+ hour, minute, second = time.split(":")
+ self._endTime = datetime.datetime(int(year), int(month), int(day),int(hour),int(minute), int(second))
+ except ValueError:
+ raise ValueError("Invalid start date format!")
+ return self._endTime
+
+
+
diff --git a/xaj/xajmodel.py b/xaj/xajmodel.py
new file mode 100644
index 0000000..2027955
--- /dev/null
+++ b/xaj/xajmodel.py
@@ -0,0 +1,1060 @@
+"""
+Core code for XinAnJiang model
+"""
+import logging
+from typing import Union
+import numpy as np
+from numba import jit
+from scipy.special import gamma
+
+from xaj.params import MODEL_PARAM_DICT
+
+PRECISION = 1e-5
+
+
+@jit
+def calculate_evap(lm, c, wu0, wl0, prcp, pet) -> tuple[np.array, np.array, np.array]:
+ """
+ Three-layers evaporation model from "Watershed Hydrologic Simulation" written by Prof. RenJun Zhao.
+ The book is Chinese, and its name is 《流域水文模拟》;
+ The three-layers evaporation model is described in Page 76;
+ The method is same with that in Page 22-23 in "Hydrologic Forecasting (5-th version)" written by Prof. Weimin Bao.
+ This book's Chinese name is 《水文预报》
+
+ Parameters
+ ----------
+ lm
+ average soil moisture storage capacity of lower layer
+ c
+ coefficient of deep layer
+ wu0
+ initial soil moisture of upper layer; update in each time step
+ wl0
+ initial soil moisture of lower layer; update in each time step
+ prcp
+ basin mean precipitation
+ pet
+ potential evapotranspiration
+
+ Returns
+ -------
+ tuple[np.array,np.array,np.array]
+ eu/el/ed are evaporation from upper/lower/deeper layer, respectively
+ """
+ eu = np.where(wu0 + prcp >= pet, pet, wu0 + prcp)
+ ed = np.where((wl0 < c * lm) & (wl0 < c * (pet - eu)), c * (pet - eu) - wl0, 0.0)
+ el = np.where(
+ wu0 + prcp >= pet,
+ 0.0,
+ np.where(
+ wl0 >= c * lm,
+ (pet - eu) * wl0 / lm,
+ np.where(wl0 >= c * (pet - eu), c * (pet - eu), wl0),
+ ),
+ )
+ return eu, el, ed
+
+
+@jit
+def calculate_prcp_runoff(b, im, wm, w0, pe) -> tuple[np.array, np.array]:
+ """
+ Calculates the amount of runoff generated from rainfall after entering the underlying surface.
+
+ Same in "Watershed Hydrologic Simulation" and "Hydrologic Forecasting (5-th version)"
+
+ Parameters
+ ----------
+ b
+ B; exponent coefficient
+ im
+ IMP; imperiousness coefficient
+ wm
+ average soil moisture storage capacity
+ w0
+ initial soil moisture
+ pe
+ net precipitation
+
+ Returns
+ -------
+ tuple[np.array,np.array]
+ r -- runoff; r_im -- runoff of impervious part
+ """
+ wmm = wm * (1.0 + b)
+ a = wmm * (1.0 - (1.0 - w0 / wm) ** (1.0 / (1.0 + b)))
+ if np.isnan(a).any():
+ raise ArithmeticError("Please check if w0>wm or b is a negative value!")
+ r_cal = np.where(
+ pe > 0.0,
+ np.where(
+ pe + a < wmm,
+ # 1e-5 is a precision which we set to guarantee float's calculation is correct
+ pe - (wm - w0) + wm * (1.0 - np.minimum(a + pe, wmm) / wmm) ** (1.0 + b),
+ pe - (wm - w0),
+ ),
+ np.full(pe.shape, 0.0),
+ )
+ r = np.maximum(r_cal, 0.0)
+ # separate impervious part with the other
+ r_im_cal = pe * im
+ r_im = np.maximum(r_im_cal, 0.0)
+ return r, r_im
+
+
+def calculate_w_storage(
+ um, lm, dm, wu0, wl0, wd0, eu, el, ed, pe, r
+) -> tuple[np.array, np.array, np.array]:
+ """
+ Update the soil moisture values of the three layers.
+
+ According to the equation 2.60 in the book《水文预报》
+
+ Parameters
+ ----------
+ um
+ average soil moisture storage capacity of the upper layer
+ lm
+ average soil moisture storage capacity of the lower layer
+ dm
+ average soil moisture storage capacity of the deep layer
+ wu0
+ initial values of soil moisture in upper layer
+ wl0
+ initial values of soil moisture in lower layer
+ wd0
+ initial values of soil moisture in deep layer
+ eu
+ evaporation of the upper layer; it isn't used in this function
+ el
+ evaporation of the lower layer
+ ed
+ evaporation of the deep layer
+ pe
+ net precipitation; it is able to be negative value in this function
+ r
+ runoff
+
+ Returns
+ -------
+ tuple[np.array,np.array,np.array]
+ wu,wl,wd -- soil moisture in upper, lower and deep layer
+ """
+ # pe>0: the upper soil moisture was added firstly, then lower layer, and the final is deep layer
+ # pe<=0: no additional water, just remove evapotranspiration,
+ # but note the case: e >= p > 0
+ # (1) if wu0 + p > e, then e = eu (2) else, wu must be zero
+ wu = np.where(
+ pe > 0.0,
+ np.where(wu0 + pe - r < um, wu0 + pe - r, um),
+ np.where(wu0 + pe > 0.0, wu0 + pe, 0.0),
+ )
+ # calculate wd before wl because it is easier to cal using where statement
+ wd = np.where(
+ pe > 0.0,
+ np.where(wu0 + wl0 + pe - r > um + lm, wu0 + wl0 + wd0 + pe - r - um - lm, wd0),
+ wd0 - ed,
+ )
+ # water balance (equation 2.2 in Page 13, also shown in Page 23)
+ # if wu0 + p > e, then e = eu; else p must be used in upper layer,
+ # so no matter what the case is, el didn't include p, neither ed
+ wl = np.where(pe > 0.0, wu0 + wl0 + wd0 + pe - r - wu - wd, wl0 - el)
+ # the water storage should be in reasonable range
+ wu_ = np.clip(wu, a_min=0.0, a_max=um)
+ wl_ = np.clip(wl, a_min=0.0, a_max=lm)
+ wd_ = np.clip(wd, a_min=0.0, a_max=dm)
+ return wu_, wl_, wd_
+
+
+def generation(p_and_e, k, b, im, um, lm, dm, c, wu0=None, wl0=None, wd0=None) -> tuple:
+ """
+ Single-step runoff generation in XAJ.
+
+ Parameters
+ ----------
+ p_and_e
+ precipitation and potential evapotranspiration
+ k
+ ratio of potential evapotranspiration to reference crop evaporation
+ b
+ exponent parameter
+ um
+ average soil moisture storage capacity of the upper layer
+ lm
+ average soil moisture storage capacity of the lower layer
+ dm
+ average soil moisture storage capacity of the deep layer
+ im
+ impermeability coefficient
+ c
+ coefficient of deep layer
+ wu0
+ initial values of soil moisture in upper layer
+ wl0
+ initial values of soil moisture in lower layer
+ wd0
+ initial values of soil moisture in deep layer
+
+ Returns
+ -------
+ tuple[tuple, tuple]
+ (r, rim, e, pe), (wu, wl, wd); all variables are np.array
+ """
+ # make sure physical variables' value ranges are correct
+ prcp = np.maximum(p_and_e[:, 0], 0.0)
+ # get potential evapotranspiration
+ pet = np.maximum(p_and_e[:, 1] * k, 0.0)
+ # wm
+ wm = um + lm + dm
+ if wu0 is None:
+ # just an initial value
+ wu0 = 0.6 * um
+ if wl0 is None:
+ wl0 = 0.6 * lm
+ if wd0 is None:
+ wd0 = 0.6 * dm
+ w0_ = wu0 + wl0 + wd0
+
+ # w0 need locate in correct range so that following calculation could be right
+ # To make sure float data's calculation is correct, we'd better minus a precision (1e-5)
+ w0 = np.minimum(w0_, wm - 1e-5)
+
+ # Calculate the amount of evaporation from storage
+ eu, el, ed = calculate_evap(lm, c, wu0, wl0, prcp, pet)
+ e = eu + el + ed
+
+ # Calculate the runoff generated by net precipitation
+ prcp_difference = prcp - e
+ pe = np.maximum(prcp_difference, 0.0)
+ r, rim = calculate_prcp_runoff(b, im, wm, w0, pe)
+ # Update wu, wl, wd
+ wu, wl, wd = calculate_w_storage(
+ um, lm, dm, wu0, wl0, wd0, eu, el, ed, prcp_difference, r
+ )
+
+ return (r, rim, e, pe), (wu, wl, wd)
+
+
+def sources(pe, r, sm, ex, ki, kg, s0=None, fr0=None, book="HF") -> tuple:
+ """
+ Divide the runoff to different sources
+
+ We use the initial version from the paper of the inventor of the XAJ model -- Prof. Renjun Zhao:
+ "Analysis of parameters of the XinAnJiang model". Its Chinese name is <<新安江模型参数的分析>>,
+ which could be found by searching in "Baidu Xueshu".
+ The module's code can also be found in "Watershed Hydrologic Simulation" (WHS) Page 174.
+ It is nearly same with that in "Hydrologic Forecasting" (HF) Page 148-149
+ We use the period average runoff as input and the unit period is day so we don't need to difference it as books show
+
+ We also provide code for formula from《水文预报》"Hydrologic Forecasting" (HF) the fifth version. Page 40-41 and 150-151;
+ the procedures in 《工程水文学》"Engineering Hydrology" (EH) the third version are different we also provide.
+ they are in the "sources5mm" function.
+
+ Parameters
+ ------------
+ pe
+ net precipitation
+ r
+ runoff from xaj_generation
+ sm
+ areal mean free water capacity of the surface layer
+ ex
+ exponent of the free water capacity curve
+ ki
+ outflow coefficients of the free water storage to interflow relationships
+ kg
+ outflow coefficients of the free water storage to groundwater relationships
+ s0
+ free water capacity of last period
+ fr0
+ runoff area of last period
+
+ Return
+ ------------
+ tuple[tuple, tuple]
+ rs -- surface runoff; ri-- interflow runoff; rg -- groundwater runoff;
+ s1 -- final free water capacity;
+ all variables are numpy array
+
+ """
+ # maximum free water storage capacity in a basin
+ ms = sm * (1.0 + ex)
+ if fr0 is None:
+ fr0 = 0.1
+ if s0 is None:
+ s0 = 0.5 * sm
+ # For free water storage, because s is related to fr and s0 and fr0 are both values of last period,
+ # we have to trans the initial value of s from last period to this one.
+ # both WHS(流域水文模拟)'s sample code and HF(水文预报) use s = fr0 * s0 / fr.
+ # I think they both think free water reservoir as a cubic tank. Its height is s and area of bottom rectangle is fr
+ # we will have a cubic tank with varying bottom and height,
+ # and fixed boundary (in HF sm is fixed) or none-fixed boundary (in EH smmf is not fixed)
+ # but notice r's list like" [1,0] which 1 is the 1st period's runoff and 0 is the 2nd period's runoff
+ # after 1st period, the s1 could not be zero, but in the 2nd period, fr=0, then we cannot set s=0, because some water still in the tank
+ # fr's formula could be found in Eq. 9 in "Analysis of parameters of the XinAnJiang model",
+ # Here our r doesn't include rim, so there is no need to remove rim from r; this is also the method in 《水文预报》(HF)
+
+ # NOTE: when r is 0, fr should be 0, however, s1 may not be zero and it still hold some water,
+ # then fr can not be 0, otherwise when fr is used as denominator it lead to error,
+ # so we have to deal with this case later, for example, when r=0, we cannot use pe * fr to replace r
+ # because fr get the value of last period, and it is not 0
+ fr = np.copy(fr0)
+ if any(fr == 0.0):
+ raise ArithmeticError(
+ "Please check fr's value, fr==0.0 will cause error in the next step!"
+ )
+ # r=0, then r/pe must be 0
+ fr_mask = r > 0.0
+ fr[fr_mask] = r[fr_mask] / pe[fr_mask]
+ if np.isnan(fr).any():
+ raise ArithmeticError("Please check pe's data! there may be 0.0")
+
+ # if fr=0, then we cannot get ss, but ss should not be 0, because s1 of last period may not be 0 and it still hold some water
+ ss = np.copy(s0)
+ # same initialization for s
+ s = np.copy(s0)
+
+ ss[fr_mask] = fr0[fr_mask] * s0[fr_mask] / fr[fr_mask]
+
+ if book == "HF":
+ ss = np.minimum(ss, sm)
+ au = ms * (1.0 - (1.0 - ss / sm) ** (1.0 / (1.0 + ex)))
+ if np.isnan(au).any():
+ raise ValueError(
+ "Error: NaN values detected. Try set clip function or check your data!!!"
+ )
+ # the first condition should be r > 0.0, when r=0, rs must be 0, fig 6-12 in EH or fig 5-4 in HF
+ # so we have to use "pe" carefully!!! when r>0.0, we use pe, otherwise we don't use it!!!
+ rs = np.full(r.shape, 0.0)
+ rs[fr_mask] = np.where(
+ pe[fr_mask] + au[fr_mask] < ms[fr_mask],
+ # equation 2-85 in HF
+ fr[fr_mask]
+ * (
+ pe[fr_mask]
+ - sm[fr_mask]
+ + ss[fr_mask]
+ + sm[fr_mask]
+ * (
+ (
+ 1
+ - np.minimum(pe[fr_mask] + au[fr_mask], ms[fr_mask])
+ / ms[fr_mask]
+ )
+ ** (1 + ex[fr_mask])
+ )
+ ),
+ # equation 2-86 in HF
+ fr[fr_mask] * (pe[fr_mask] + ss[fr_mask] - sm[fr_mask]),
+ )
+ rs = np.minimum(rs, r)
+
+ # ri's mask is not same as rs's, because last period's s may not be 0
+ # and in this time, ri and rg could be larger than 0
+ # we need firstly calculate the updated s, s's mask is same as fr_mask,
+ # when r==0, then s will be equal to last period's
+ # equation 2-87 in HF, some free water leave or save, so we update free water storage
+ s[fr_mask] = ss[fr_mask] + (r[fr_mask] - rs[fr_mask]) / fr[fr_mask]
+ s = np.minimum(s, sm)
+ if np.isnan(s).any():
+ raise ArithmeticError("Please check fr's data! there may be 0.0")
+
+ elif book == "EH":
+ # smmf should be with correpond with s
+ smmf = ms * (1 - (1 - fr) ** (1 / ex))
+ smf = smmf / (1 + ex)
+ ss = np.minimum(ss, smf)
+ au = smmf * (1 - (1 - ss / smf) ** (1 / (1 + ex)))
+ if np.isnan(au).any():
+ raise ValueError(
+ "Error: NaN values detected. Try set clip function or check your data!!!"
+ )
+
+ # rs's mask is keep with fr_mask
+ rs = np.full(r.shape, 0.0)
+ rs[fr_mask] = np.where(
+ pe[fr_mask] + au[fr_mask] < smmf[fr_mask],
+ (
+ pe[fr_mask]
+ - smf[fr_mask]
+ + ss[fr_mask]
+ + smf[fr_mask]
+ * (
+ 1
+ - np.minimum(pe[fr_mask] + au[fr_mask], smmf[fr_mask])
+ / smmf[fr_mask]
+ )
+ ** (ex[fr_mask] + 1)
+ )
+ * fr[fr_mask],
+ (pe[fr_mask] + ss[fr_mask] - smf[fr_mask]) * fr[fr_mask],
+ )
+ rs = np.minimum(rs, r)
+ s[fr_mask] = ss[fr_mask] + (r[fr_mask] - rs[fr_mask]) / fr[fr_mask]
+ s = np.minimum(s, smf)
+ else:
+ raise ValueError("Please set book as 'HF' or 'EH'!")
+ # the following part is same for both HF and EH. Even the formula is different, but their meaning is same
+ # equation 2-88 in HF, next interflow and ground water will be released from the updated free water storage
+ # We use the period average runoff as input and the general unit period is day.
+ # Hence, we directly use ki and kg rather than ki_{Δt} in books.
+ ri = ki * s * fr
+ rg = kg * s * fr
+ # ri = np.where(s < PRECISION, np.full(r.shape, 0.0), ki * s * fr)
+ # rg = np.where(s < PRECISION, np.full(r.shape, 0.0), kg * s * fr)
+ # equation 2-89 in HF; although it looks different with that in WHS, they are actually same
+ # Finally, calculate the final free water storage
+ s1 = s * (1 - ki - kg)
+ # s1 = np.where(s1 < PRECISION, np.full(s1.shape, 0.0), s1)
+ return (rs, ri, rg), (s1, fr)
+
+
+def sources5mm(
+ pe,
+ runoff,
+ sm,
+ ex,
+ ki,
+ kg,
+ s0=None,
+ fr0=None,
+ time_interval_hours=24,
+ book="HF",
+):
+ """
+ Divide the runoff to different sources according to books -- 《水文预报》HF 5th edition and 《工程水文学》EH 3rd edition
+
+ Parameters
+ ----------
+ pe
+ net precipitation
+ runoff
+ runoff from xaj_generation
+ sm
+ areal mean free water capacity of the surface layer
+ ex
+ exponent of the free water capacity curve
+ ki
+ outflow coefficients of the free water storage to interflow relationships
+ kg
+ outflow coefficients of the free water storage to groundwater relationships
+ s0
+ initial free water capacity
+ fr0
+ initial area of generation
+ time_interval_hours
+ 由于Ki、Kg、Ci、Cg都是以24小时为时段长定义的,需根据时段长转换
+ book
+ the methods in 《水文预报》HF 5th edition and 《工程水文学》EH 3rd edition are different,
+ hence, both are provided, and the default is the former -- "ShuiWenYuBao";
+ the other one is "GongChengShuiWenXue"
+
+ Returns
+ -------
+ tuple[tuple, tuple]
+ rs_s -- surface runoff; rss_s-- interflow runoff; rg_s -- groundwater runoff;
+ (fr_ds[-1], s_ds[-1]): state variables' final value;
+ all variables are numpy array
+ """
+ # 由于Ki、Kg都是以24小时为时段长定义的,需根据时段长转换
+ hours_per_day = 24
+ # 非整除情况,时段+1
+ residue_temp = hours_per_day % time_interval_hours
+ if residue_temp != 0:
+ residue_temp = 1
+ period_num_1d = int(hours_per_day / time_interval_hours) + residue_temp
+ # 当kss+kg>1时,根式为偶数运算时,kss_period会成为复数,这里会报错;另外注意分母可能为0,kss不可取0
+ # 对kss+kg的取值进行限制
+ kss_period = (1 - (1 - (ki + kg)) ** (1 / period_num_1d)) / (1 + kg / ki)
+ kg_period = kss_period * kg / ki
+
+ # 流域最大点自由水蓄水容量深
+ smm = sm * (1 + ex)
+ if s0 is None:
+ s0 = 0.50 * sm
+ if fr0 is None:
+ fr0 = 0.1
+ # don't use np.where here, because it will cause some warning
+ fr = np.copy(fr0)
+ fr_mask = runoff > 0.0
+ fr[fr_mask] = runoff[fr_mask] / pe[fr_mask]
+ if np.all(runoff < 5):
+ n = 1
+ else:
+ # when modeling multiple basins, the number of divides is not the same, so we use the maximum number
+ r_max = np.max(runoff)
+ residue_temp = r_max % 5
+ if residue_temp != 0:
+ residue_temp = 1
+ n = int(r_max / 5) + residue_temp
+ rn = runoff / n
+ pen = pe / n
+ kss_d = (1 - (1 - (kss_period + kg_period)) ** (1 / n)) / (
+ 1 + kg_period / kss_period
+ )
+ kg_d = kss_d * kg_period / kss_period
+ # kss_d = kss_period
+ # kg_d = kg_period
+ rs = np.full(runoff.shape, 0.0)
+ rss = np.full(runoff.shape, 0.0)
+ rg = np.full(runoff.shape, 0.0)
+
+ s_ds = []
+ fr_ds = []
+ s_ds.append(s0)
+ fr_ds.append(fr0)
+
+ for j in range(n):
+ fr0_d = fr_ds[j]
+ s0_d = s_ds[j]
+ # equation 5-32 in HF, but strange, cause each period, rn/pen is same, fr_d should be same
+ # fr_d = 1 - (1 - fr) ** (1 / n)
+ fr_d = fr
+
+ ss_d = np.copy(s0_d)
+ s_d = np.copy(s0_d)
+ s1_d = np.copy(s0_d)
+
+ ss_d[fr_mask] = fr0_d[fr_mask] * s0_d[fr_mask] / fr_d[fr_mask]
+
+ if book == "HF":
+ ss_d = np.minimum(ss_d, sm)
+ # ms = smm
+ au = smm * (1.0 - (1.0 - ss_d / sm) ** (1.0 / (1.0 + ex)))
+ if np.isnan(au).any():
+ raise ValueError(
+ "Error: NaN values detected. Try set clip function or check your data!!!"
+ )
+ rs_j = np.full(rn.shape, 0.0)
+ rs_j[fr_mask] = np.where(
+ pen[fr_mask] + au[fr_mask] < smm[fr_mask],
+ # equation 5-26 in HF
+ fr_d[fr_mask]
+ * (
+ pen[fr_mask]
+ - sm[fr_mask]
+ + ss_d[fr_mask]
+ + sm[fr_mask]
+ * (
+ (
+ 1
+ - np.minimum(pen[fr_mask] + au[fr_mask], smm[fr_mask])
+ / smm[fr_mask]
+ )
+ ** (1 + ex[fr_mask])
+ )
+ ),
+ # equation 5-27 in HF
+ fr_d[fr_mask] * (pen[fr_mask] + ss_d[fr_mask] - sm[fr_mask]),
+ )
+ rs_j = np.minimum(rs_j, rn)
+ s_d[fr_mask] = ss_d[fr_mask] + (rn[fr_mask] - rs_j[fr_mask]) / fr_d[fr_mask]
+ s_d = np.minimum(s_d, sm)
+
+ elif book == "EH":
+ smmf = smm * (1 - (1 - fr_d) ** (1 / ex))
+ smf = smmf / (1 + ex)
+ ss_d = np.minimum(ss_d, smf)
+ au = smmf * (1 - (1 - ss_d / smf) ** (1 / (1 + ex)))
+ if np.isnan(au).any():
+ raise ValueError(
+ "Error: NaN values detected. Try set clip function or check your data!!!"
+ )
+ rs_j = np.full(rn.shape, 0.0)
+ rs_j[fr_mask] = np.where(
+ pen[fr_mask] + au[fr_mask] < smmf[fr_mask],
+ (
+ pen[fr_mask]
+ - smf[fr_mask]
+ + ss_d[fr_mask]
+ + smf[fr_mask]
+ * (
+ 1
+ - np.minimum(pen[fr_mask] + au[fr_mask], smmf[fr_mask])
+ / smmf[fr_mask]
+ )
+ ** (ex[fr_mask] + 1)
+ )
+ * fr_d[fr_mask],
+ (pen[fr_mask] + ss_d[fr_mask] - smf[fr_mask]) * fr_d[fr_mask],
+ )
+ rs_j = np.minimum(rs_j, rn)
+ s_d[fr_mask] = ss_d[fr_mask] + (rn[fr_mask] - rs_j[fr_mask]) / fr_d[fr_mask]
+ s_d = np.minimum(s_d, smf)
+
+ else:
+ raise NotImplementedError(
+ "We don't have this implementation! Please chose 'HF' or 'EH'!!"
+ )
+ rss_j = s_d * kss_d * fr_d
+ rg_j = s_d * kg_d * fr_d
+ s1_d = s_d * (1 - kss_d - kg_d)
+
+ rs = rs + rs_j
+ rss = rss + rss_j
+ rg = rg + rg_j
+ # 赋值s_d和fr_d到数组中,以给下一段做初值
+ s_ds.append(s1_d)
+ fr_ds.append(fr_d)
+
+ return (rs, rss, rg), (s_ds[-1], fr_ds[-1])
+
+
+@jit
+def linear_reservoir(x, weight, last_y=None) -> np.array:
+ """
+ Linear reservoir's release function
+
+ Parameters
+ ----------
+ x
+ the input to the linear reservoir
+ weight
+ the coefficient of linear reservoir
+ last_y
+ the output of last period
+
+ Returns
+ -------
+ np.array
+ one-step forward result
+ """
+ weight1 = 1 - weight
+ if last_y is None:
+ last_y = np.full(weight.shape, 0.001)
+ y = weight * last_y + weight1 * x
+ return y
+
+
+def uh_conv(x, uh_from_gamma):
+ """
+ Function for 1d-convolution calculation
+
+ Parameters
+ ----------
+ x
+ x is a sequence-first variable; the dim of x is [seq, batch, feature=1];
+ feature must be 1
+ uh_from_gamma
+ unit hydrograph from uh_gamma; the dim: [len_uh, batch, feature=1];
+ feature must be 1
+
+ Returns
+ -------
+ np.array
+ convolution
+ """
+ outputs = np.full(x.shape, 0.0)
+ time_length, batch_size, feature_size = x.shape
+ if feature_size > 1:
+ logging.error("We only support one-dim convolution now!!!")
+ for i in range(batch_size):
+ uh = uh_from_gamma[:, i, 0]
+ inputs = x[:, i, 0]
+ outputs[:, i, 0] = np.convolve(inputs, uh)[:time_length]
+ return outputs
+
+
+def uh_gamma(a, theta, len_uh=15):
+ """
+ A simple two-parameter Gamma distribution as a unit-hydrograph to route instantaneous runoff from a hydrologic model
+ The method comes from mizuRoute -- http://www.geosci-model-dev.net/9/2223/2016/
+
+ Parameters
+ ----------
+ a
+ shape parameter
+ theta
+ timescale parameter
+ len_uh
+ the time length of the unit hydrograph
+ Returns
+ -------
+ torch.Tensor
+ the unit hydrograph, dim: [seq, batch, feature]
+ """
+ # dims of a: time_seq (same all time steps), batch, feature=1
+ m = a.shape
+ if len_uh > m[0]:
+ raise RuntimeError(
+ "length of unit hydrograph should be smaller than the whole length of input"
+ )
+ # aa > 0, here we set minimum 0.1 (min of a is 0, set when calling this func); First dimension of a is repeat
+ aa = np.maximum(0.0, a[0:len_uh, :, :]) + 0.1
+ # theta > 0, here set minimum 0.5
+ theta = np.maximum(0.0, theta[0:len_uh, :, :]) + 0.5
+ # len_f, batch, feature
+ t = np.expand_dims(
+ np.swapaxes(np.tile(np.arange(0.5, len_uh * 1.0), (m[1], 1)), 0, 1), axis=-1
+ )
+ denominator = gamma(aa) * (theta**aa)
+ # [len_f, m[1], m[2]]
+ w = 1 / denominator * (t ** (aa - 1)) * (np.exp(-t / theta))
+ w = w / w.sum(0) # scale to 1 for each UH
+ return w
+
+
+def xaj_runoff(
+ p_and_e,
+ params_runoff: Union[np.array, list],
+ *w0, s0, fr0,
+ **kwargs,
+) -> Union[tuple, np.array]:
+ model_name = kwargs["name"] if "name" in kwargs else "xaj"
+ source_type = kwargs["source_type"] if "source_type" in kwargs else "sources"
+ source_book = kwargs["source_book"] if "source_book" in kwargs else "HF"
+ # params
+ param_ranges = MODEL_PARAM_DICT[model_name]["param_range"]
+ if model_name == "xaj":
+ route_method = "CSL"
+ elif model_name == "xaj_mz":
+ route_method = "MZ"
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CS' or 'MZ'!"
+ )
+ xaj_params = [
+ (value[1] - value[0]) * params_runoff[:, i] + value[0]
+ for i, (key, value) in enumerate(param_ranges.items())
+ ]
+ k = xaj_params[0]
+ b = xaj_params[1]
+ im = xaj_params[2]
+ um = xaj_params[3]
+ lm = xaj_params[4]
+ dm = xaj_params[5]
+ c = xaj_params[6]
+ sm = xaj_params[7]
+ ex = xaj_params[8]
+ ki_ = xaj_params[9]
+ kg_ = xaj_params[10]
+ # ki+kg should be smaller than 1; if not, we scale them
+ ki = np.where(ki_ + kg_ < 1.0, ki_, (1.0 - PRECISION) / (ki_ + kg_) * ki_)
+ kg = np.where(ki_ + kg_ < 1.0, kg_, (1.0 - PRECISION) / (ki_ + kg_) * kg_)
+
+
+ # w0 = (0.5 * um, 0.5 * lm, 0.5 * dm)
+ inputs = p_and_e[:, :, :]
+ runoff_ims_ = np.full(inputs.shape[:2], 0.0)
+ rss_ = np.full(inputs.shape[:2], 0.0)
+ ris_ = np.full(inputs.shape[:2], 0.0)
+ rgs_ = np.full(inputs.shape[:2], 0.0)
+ es_ = np.full(inputs.shape[:2], 0.0)
+ for i in range(inputs.shape[0]):
+ if i == 0:
+ (r, rim, e, pe), w = generation(
+ inputs[i, :, :], k, b, im, um, lm, dm, c, *w0
+ )
+ if source_type == "sources":
+ (rs, ri, rg), (s, fr) = sources(
+ pe, r, sm, ex, ki, kg, s0, fr0, book=source_book
+ )
+ elif source_type == "sources5mm":
+ (rs, ri, rg), (s, fr) = sources5mm(
+ pe, r, sm, ex, ki, kg, s0, fr0, book=source_book
+ )
+ else:
+ raise NotImplementedError("No such divide-sources method")
+ else:
+ (r, rim, e, pe), w = generation(
+ inputs[i, :, :], k, b, im, um, lm, dm, c, *w
+ )
+ if source_type == "sources":
+ (rs, ri, rg), (s, fr) = sources(
+ pe, r, sm, ex, ki, kg, s, fr, book=source_book
+ )
+ elif source_type == "sources5mm":
+ (rs, ri, rg), (s, fr) = sources5mm(
+ pe, r, sm, ex, ki, kg, s, fr, book=source_book
+ )
+ else:
+ raise NotImplementedError("No such divide-sources method")
+ # impevious part is pe * im
+ runoff_ims_[i, :] = rim
+ # so for non-imprvious part, the result should be corrected
+ rss_[i, :] = rs * (1 - im)
+ ris_[i, :] = ri * (1 - im)
+ rgs_[i, :] = rg * (1 - im)
+ es_[i, :] = e
+ # seq, batch, feature
+ runoff_im = np.expand_dims(runoff_ims_, axis=2)
+ rss = np.expand_dims(rss_, axis=2)
+ es = np.expand_dims(es_, axis=2)
+ return runoff_im, rss_,ris_, rgs_, es,rss
+
+def xaj_route(
+ p_and_e,
+ params_route: Union[np.array, list],
+ ris_,
+ rgs_,
+ rss_,
+ rss,
+ runoff_im,
+ qi0,
+ qg0,
+ es,
+ **kwargs,
+)-> Union[tuple, np.array]:
+
+ model_name = kwargs["name"] if "name" in kwargs else "xaj"
+ source_type = kwargs["source_type"] if "source_type" in kwargs else "sources"
+ source_book = kwargs["source_book"] if "source_book" in kwargs else "HF"
+ # params
+ param_ranges = MODEL_PARAM_DICT[model_name]["param_range"]
+ if model_name == "xaj":
+ route_method = "CSL"
+ elif model_name == "xaj_mz":
+ route_method = "MZ"
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CS' or 'MZ'!"
+ )
+ xaj_params = [
+ (value[1] - value[0]) * params_route[:, i] + value[0]
+ for i, (key, value) in enumerate(param_ranges.items())
+ ]
+ k = xaj_params[0]
+ b = xaj_params[1]
+ im = xaj_params[2]
+ um = xaj_params[3]
+ lm = xaj_params[4]
+ dm = xaj_params[5]
+ c = xaj_params[6]
+ sm = xaj_params[7]
+ ex = xaj_params[8]
+ ki_ = xaj_params[9]
+ kg_ = xaj_params[10]
+ # ki+kg should be smaller than 1; if not, we scale them
+ ki = np.where(ki_ + kg_ < 1.0, ki_, (1.0 - PRECISION) / (ki_ + kg_) * ki_)
+ kg = np.where(ki_ + kg_ < 1.0, kg_, (1.0 - PRECISION) / (ki_ + kg_) * kg_)
+ if route_method == "CSL":
+ cs = xaj_params[11]
+ l = xaj_params[12]
+
+ elif route_method == "MZ":
+ # we will use routing method from mizuRoute -- http://www.geosci-model-dev.net/9/2223/2016/
+ a = xaj_params[11]
+ theta = xaj_params[12]
+ # make it as a parameter
+ kernel_size = int(xaj_params[15])
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CS' or 'MZ'!"
+ )
+ ci = xaj_params[13]
+ cg = xaj_params[14]
+
+ inputs = p_and_e[:, :, :]
+ qs = np.full(inputs.shape[:2], 0.0)
+ if route_method == "CSL":
+ qt = np.full(inputs.shape[:2], 0.0)
+ for i in range(inputs.shape[0]):
+ if i == 0:
+ qi = linear_reservoir(ris_[i], ci, qi0)
+ qg = linear_reservoir(rgs_[i], cg, qg0)
+ else:
+ qi = linear_reservoir(ris_[i], ci, qi)
+ qg = linear_reservoir(rgs_[i], cg, qg)
+ qs_ = rss_[i]
+ qt[i, :] = qs_ + qi + qg
+
+ for j in range(len(l)):
+ lag = int(l[j])
+ for i in range(0,lag):
+ qs[i, j] = qt[i, j]
+ if i == lag:
+ break
+ for i in range(lag, inputs.shape[0]):
+ qs[i, j] = cs[j] * qs[i - 1, j] + (1 - cs[j]) * qt[i - lag, j]
+
+ elif route_method == "MZ":
+ rout_a = a.repeat(rss.shape[0]).reshape(rss.shape)
+ rout_b = theta.repeat(rss.shape[0]).reshape(rss.shape)
+ conv_uh = uh_gamma(rout_a, rout_b, kernel_size)
+ qs_ = uh_conv(runoff_im + rss, conv_uh)
+ for i in range(inputs.shape[0]):
+ if i == 0:
+ qi = linear_reservoir(ris_[i], ci, qi0)
+ qg = linear_reservoir(rgs_[i], cg, qg0)
+ else:
+ qi = linear_reservoir(ris_[i], ci, qi)
+ qg = linear_reservoir(rgs_[i], cg, qg)
+ qs[i, :] = qs_[i, :, 0] + qi + qg
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CSL' or 'MZ'!"
+ )
+
+ # seq, batch, feature
+ q_sim = np.expand_dims(qs, axis=2)
+ return q_sim, es
+
+def xaj_state(
+ p_and_e,
+ params: Union[np.array, list],
+ return_state=True,
+ warmup_length=365,
+ **kwargs,
+) -> Union[tuple, np.array]:
+ # default values for some function parameters
+ model_name = kwargs["name"] if "name" in kwargs else "xaj"
+ source_type = kwargs["source_type"] if "source_type" in kwargs else "sources"
+ source_book = kwargs["source_book"] if "source_book" in kwargs else "HF"
+ # params
+ param_ranges = MODEL_PARAM_DICT[model_name]["param_range"]
+ if model_name == "xaj":
+ route_method = "CSL"
+ elif model_name == "xaj_mz":
+ route_method = "MZ"
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CS' or 'MZ'!"
+ )
+ xaj_params = [
+ (value[1] - value[0]) * params[:, i] + value[0]
+ for i, (key, value) in enumerate(param_ranges.items())
+ ]
+ k = xaj_params[0]
+ b = xaj_params[1]
+ im = xaj_params[2]
+ um = xaj_params[3]
+ lm = xaj_params[4]
+ dm = xaj_params[5]
+ c = xaj_params[6]
+ sm = xaj_params[7]
+ ex = xaj_params[8]
+ ki_ = xaj_params[9]
+ kg_ = xaj_params[10]
+ # ki+kg should be smaller than 1; if not, we scale them
+ ki = np.where(ki_ + kg_ < 1.0, ki_, (1.0 - PRECISION) / (ki_ + kg_) * ki_)
+ kg = np.where(ki_ + kg_ < 1.0, kg_, (1.0 - PRECISION) / (ki_ + kg_) * kg_)
+ if route_method == "CSL":
+ cs = xaj_params[11]
+ l = xaj_params[12]
+
+ elif route_method == "MZ":
+ # we will use routing method from mizuRoute -- http://www.geosci-model-dev.net/9/2223/2016/
+ a = xaj_params[11]
+ theta = xaj_params[12]
+ # make it as a parameter
+ kernel_size = int(xaj_params[15])
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CS' or 'MZ'!"
+ )
+ ci = xaj_params[13]
+ cg = xaj_params[14]
+
+
+ # initialize state values
+ # if warmup_length > 0:
+ # p_and_e_warmup = p_and_e[0:warmup_length, :, :]
+ # _q, _e, *w0, s0, fr0, qi0, qg0 = xaj_state(
+ # p_and_e_warmup,
+ # params,
+ # return_state=True,
+ # warmup_length=0,
+ # **kwargs,
+ # )
+ # else:
+ w0 = (0.5 * um, 0.5 * lm, 0.5 * dm)
+ s0 = 0.5 * sm
+ fr0 = np.full(ex.shape, 0.1)
+ qi0 = np.full(ci.shape, 0.1)
+ qg0 = np.full(cg.shape, 0.1)
+
+ #state_variables
+ # inputs = p_and_e[warmup_length:, :, :]
+ inputs = p_and_e[:, :, :]
+ runoff_ims_ = np.full(inputs.shape[:2], 0.0)
+ rss_ = np.full(inputs.shape[:2], 0.0)
+ ris_ = np.full(inputs.shape[:2], 0.0)
+ rgs_ = np.full(inputs.shape[:2], 0.0)
+ es_ = np.full(inputs.shape[:2], 0.0)
+ for i in range(inputs.shape[0]):
+ if i == 0:
+ (r, rim, e, pe), w = generation(
+ inputs[i, :, :], k, b, im, um, lm, dm, c, *w0
+ )
+ if source_type == "sources":
+ (rs, ri, rg), (s, fr) = sources(
+ pe, r, sm, ex, ki, kg, s0, fr0, book=source_book
+ )
+ elif source_type == "sources5mm":
+ (rs, ri, rg), (s, fr) = sources5mm(
+ pe, r, sm, ex, ki, kg, s0, fr0, book=source_book
+ )
+ else:
+ raise NotImplementedError("No such divide-sources method")
+ else:
+ (r, rim, e, pe), w = generation(
+ inputs[i, :, :], k, b, im, um, lm, dm, c, *w
+ )
+ if source_type == "sources":
+ (rs, ri, rg), (s, fr) = sources(
+ pe, r, sm, ex, ki, kg, s, fr, book=source_book
+ )
+ elif source_type == "sources5mm":
+ (rs, ri, rg), (s, fr) = sources5mm(
+ pe, r, sm, ex, ki, kg, s, fr, book=source_book
+ )
+ else:
+ raise NotImplementedError("No such divide-sources method")
+ # impevious part is pe * im
+ runoff_ims_[i, :] = rim
+ # so for non-imprvious part, the result should be corrected
+ rss_[i, :] = rs * (1 - im)
+ ris_[i, :] = ri * (1 - im)
+ rgs_[i, :] = rg * (1 - im)
+ es_[i, :] = e
+ # seq, batch, feature
+ runoff_im = np.expand_dims(runoff_ims_, axis=2)
+ rss = np.expand_dims(rss_, axis=2)
+ es = np.expand_dims(es_, axis=2)
+
+ qs = np.full(inputs.shape[:2], 0.0)
+
+ if route_method == "CSL":
+ qt = np.full(inputs.shape[:2], 0.0)
+ for i in range(inputs.shape[0]):
+ if i == 0:
+ qi = linear_reservoir(ris_[i], ci, qi0)
+ qg = linear_reservoir(rgs_[i], cg, qg0)
+ else:
+ qi = linear_reservoir(ris_[i], ci, qi)
+ qg = linear_reservoir(rgs_[i], cg, qg)
+ qs_ = rss_[i]
+ qt[i, :] = qs_ + qi + qg
+
+ for j in range(len(l)):
+ # print(range(len(l)))
+ lag = int(l[j])
+ for i in range(0,lag):
+ qs[i, j] = qt[i, j]
+ if i == lag:
+ break
+ for i in range(lag, inputs.shape[0]):
+ # print(inputs.shape[0])
+ qs[i, j] = cs[j] * qs[i - 1, j] + (1 - cs[j]) * qt[i - lag, j]
+
+ elif route_method == "MZ":
+ rout_a = a.repeat(rss.shape[0]).reshape(rss.shape)
+ rout_b = theta.repeat(rss.shape[0]).reshape(rss.shape)
+ conv_uh = uh_gamma(rout_a, rout_b, kernel_size)
+ qs_ = uh_conv(runoff_im + rss, conv_uh)
+ for i in range(inputs.shape[0]):
+ if i == 0:
+ qi = linear_reservoir(ris_[i], ci, qi0)
+ qg = linear_reservoir(rgs_[i], cg, qg0)
+ else:
+ qi = linear_reservoir(ris_[i], ci, qi)
+ qg = linear_reservoir(rgs_[i], cg, qg)
+ qs[i, :] = qs_[i, :, 0] + qi + qg
+ else:
+ raise NotImplementedError(
+ "We don't provide this route method now! Please use 'CSL' or 'MZ'!"
+ )
+
+ # seq, batch, feature
+ q_sim = np.expand_dims(qs, axis=2)
+ if return_state:
+ return q_sim, es, *w, s, fr, qi, qg