Skip to content

Commit

Permalink
Merge branch 'wangjingyi1999-event' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
OuyangWenyu committed Mar 21, 2024
2 parents e439362 + b3ef01c commit 9d51465
Show file tree
Hide file tree
Showing 18 changed files with 2,417 additions and 99 deletions.
Empty file added --algorithm
Empty file.
236 changes: 236 additions & 0 deletions hydromodel/app/show_results.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,236 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 分析结果\n",
"\n",
"这是一个用于分析模型率定后测试结果的 Jupyter Notebook。读取各个exp下面的结果文件看看。"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Please Check your directory:\n",
"ROOT_DIR of the repo: d:\\code\\hydro-model-xaj\n",
"DATASET_DIR of the repo: d:\\data\n"
]
}
],
"source": [
"import os\n",
"import sys\n",
"from pathlib import Path\n",
"\n",
"sys.path.append(os.path.dirname(Path(os.path.abspath('')).parent))\n",
"import definitions"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"metric_mean_file = Path(os.path.join(\"D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/模型运行/basins_test_metrics_mean_all_cases.csv\"))\n",
"metric_median_file = Path(os.path.join(\"D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/模型运行/basins_test_metrics_median_all_cases.csv\"))"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"metric_mean = pd.read_csv(metric_mean_file, index_col=0)\n",
"metric_median = pd.read_csv(metric_median_file, index_col=0)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Bias</th>\n",
" <th>RMSE</th>\n",
" <th>ubRMSE</th>\n",
" <th>Corr</th>\n",
" <th>R2</th>\n",
" <th>NSE</th>\n",
" <th>KGE</th>\n",
" <th>FHV</th>\n",
" <th>FLV</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>HFsources</th>\n",
" <td>-10.431724</td>\n",
" <td>84.941974</td>\n",
" <td>84.29898</td>\n",
" <td>0.625164</td>\n",
" <td>0.380791</td>\n",
" <td>0.380791</td>\n",
" <td>0.308037</td>\n",
" <td>-30.076788</td>\n",
" <td>-100.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Bias RMSE ubRMSE Corr R2 NSE \\\n",
"HFsources -10.431724 84.941974 84.29898 0.625164 0.380791 0.380791 \n",
"\n",
" KGE FHV FLV \n",
"HFsources 0.308037 -30.076788 -100.0 "
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"metric_mean"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Bias</th>\n",
" <th>RMSE</th>\n",
" <th>ubRMSE</th>\n",
" <th>Corr</th>\n",
" <th>R2</th>\n",
" <th>NSE</th>\n",
" <th>KGE</th>\n",
" <th>FHV</th>\n",
" <th>FLV</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>HFsources</th>\n",
" <td>-10.431724</td>\n",
" <td>84.941974</td>\n",
" <td>84.29898</td>\n",
" <td>0.625164</td>\n",
" <td>0.380791</td>\n",
" <td>0.380791</td>\n",
" <td>0.308037</td>\n",
" <td>-30.076788</td>\n",
" <td>-100.0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Bias RMSE ubRMSE Corr R2 NSE \\\n",
"HFsources -10.431724 84.941974 84.29898 0.625164 0.380791 0.380791 \n",
"\n",
" KGE FHV FLV \n",
"HFsources 0.308037 -30.076788 -100.0 "
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"metric_median"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.7 ('xaj')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "5ff2b0240d3185dc85fb5f0a6365eefe977ea7c2afca8c64838dc9fcf4f02a96"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
74 changes: 37 additions & 37 deletions hydromodel/calibrate/calibrate_sceua.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def __init__(
warmup_length=365,
model={
"name": "xaj_mz",
"source_type": "sources",
"source_type": "sources5mm",
"source_book": "HF",
},
obj_func=None,
Expand Down Expand Up @@ -128,53 +128,53 @@ def objectivefunction(
float
likelihood
"""
# 切片
# time = pd.read_excel('/home/ldaning/code/biye/hydro-model-xaj/hydromodel/example/zhandian/洪水率定时间1.xlsx')
# calibrate_starttime = pd.to_datetime("2014-7-10 0:00")
# calibrate_endtime = pd.to_datetime("2020-6-24 0:00")
# total = 0
# count = 0
# for i in range(len(time)):
# if(time.iloc[i,0]<calibrate_endtime):
# start_num = (time.iloc[i,0]-calibrate_starttime-pd.Timedelta(hours=365))/pd.Timedelta(hours=1)
# end_num = (time.iloc[i,1]-calibrate_starttime-pd.Timedelta(hours=365))/pd.Timedelta(hours=1)
# start_num = int(start_num)
# end_num = int(end_num)
# if not self.obj_func:
# # This is used if not overwritten by user
# like_ = rmse(evaluation[start_num:end_num,], simulation[start_num:end_num,])
# total += like_
# count += 1

# else:
# Way to ensure flexible spot setup class
# like_ = self.obj_func(evaluation[start_num:end_num,], simulation[start_num:end_num,])
# total += like_
# count += 1

# like=total/count
# return like
if not self.obj_func:
# This is used if not overwritten by user
like = rmse(evaluation, simulation)

else:
# Way to ensure flexible spot setup class
like = self.obj_func(evaluation, simulation)
#切片
time = pd.read_excel('D:/研究生/毕业论文/new毕业论文/预答辩/碧流河水库/站点信息/洪水率定时间.xlsx')
calibrate_starttime = pd.to_datetime("2012-06-10 0:00:00")
calibrate_endtime = pd.to_datetime("2019-12-31 23:00:00")
total = 0
count = 0
for i in range(len(time)):
if(time.iloc[i,0]<calibrate_endtime):
start_num = (time.iloc[i,0]-calibrate_starttime-pd.Timedelta(hours=365))/pd.Timedelta(hours=1)
end_num = (time.iloc[i,1]-calibrate_starttime-pd.Timedelta(hours=365))/pd.Timedelta(hours=1)
start_num = int(start_num)
end_num = int(end_num)
if not self.obj_func:
like_ = rmse(evaluation[start_num:end_num,], simulation[start_num:end_num,])
total += like_
count += 1

else:
# Way to ensure flexible spot setup class
like_ = self.obj_func(evaluation[start_num:end_num,], simulation[start_num:end_num,])
total += like_
count += 1

like=total/count
return like
# if not self.obj_func:
# # This is used if not overwritten by user
# like= rmse(evaluation, simulation)


# else:
# # Way to ensure flexible spot setup class
# like= self.obj_func(evaluation, simulation)
# return like

# SPOTPY expects to get one or multiple values back,
# that define the performance of the model run


def calibrate_by_sceua(
p_and_e,
qobs,
dbname,
warmup_length=365,
model={
"name": "xaj_mz", # 模型
"source_type": "sources",
"name": "xaj_mz", #模型
"source_type": "sources5mm",
"source_book": "HF",
},
algorithm={
Expand Down
23 changes: 19 additions & 4 deletions hydromodel/data/data_postprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,23 @@ def read_save_sceua_calibrated_params(basin_id, save_dir, sceua_calibrated_file_
"""
results = spotpy.analyser.load_csv_results(sceua_calibrated_file_name)
<<<<<<< HEAD
bestindex, bestobjf = spotpy.analyser.get_minlikeindex(
results
) # 结果数组中具有最小目标函数的位置的索引
=======
bestindex, bestobjf = spotpy.analyser.get_minlikeindex(results) #结果数组中具有最小目标函数的位置的索引
>>>>>>> wangjingyi1999-event
best_model_run = results[bestindex]
fields = [word for word in best_model_run.dtype.names if word.startswith("par")]
fields = [word for word in best_model_run.dtype.names if word.startswith("par")]
best_calibrate_params = pd.DataFrame(list(best_model_run[fields]))
save_file = os.path.join(save_dir, basin_id + "_calibrate_params.txt")
best_calibrate_params.to_csv(save_file, sep=",", index=False, header=True)
<<<<<<< HEAD
return np.array(best_calibrate_params).reshape(1, -1) # 返回一列最佳的结果
=======
return np.array(best_calibrate_params).reshape(1, -1) #返回一列最佳的结果
>>>>>>> wangjingyi1999-event


def summarize_parameters(result_dir, model_info: dict):
Expand Down Expand Up @@ -214,8 +222,15 @@ def read_and_save_et_ouputs(result_dir, fold: int):
)
train_period = data_info_train["time"]
test_period = data_info_test["time"]
train_np_file = os.path.join(exp_dir, "data_info_fold" + str(fold) + "_train.npy")
test_np_file = os.path.join(exp_dir, "data_info_fold" + str(fold) + "_test.npy")
# TODO: basins_lump_p_pe_q_fold NAME need to be unified
train_np_file = os.path.join(
exp_dir, "data_info_fold" + str(fold) + "_train.npy"
)
test_np_file = os.path.join(
exp_dir, "data_info_fold" + str(fold) + "_test.npy"
)
# train_np_file = os.path.join(exp_dir, f"basins_lump_p_pe_q_fold{fold}_train.npy")
# test_np_file = os.path.join(exp_dir, f"basins_lump_p_pe_q_fold{fold}_test.npy")
train_data = np.load(train_np_file)
test_data = np.load(test_np_file)
es_test = []
Expand Down Expand Up @@ -256,7 +271,7 @@ def read_and_save_et_ouputs(result_dir, fold: int):
# "exp61561",
# "Dec08_11-38-48_LAPTOP-DNQOPPMS_fold1_HFsourcesrep1000ngs1000",
)
read_and_save_et_ouputs(one_model_one_hyperparam_setting_dir, fold=1)
read_and_save_et_ouputs(one_model_one_hyperparam_setting_dir, fold=0)
# summarize_parameters(one_model_one_hyperparam_setting_dir, {"name": "xaj_mz"})
# renormalize_params(one_model_one_hyperparam_setting_dir, {"name":"xaj_mz"})
# summarize_metrics(one_model_one_hyperparam_setting_dir,{"name":"xaj_mz"})
Expand Down
Loading

0 comments on commit 9d51465

Please sign in to comment.