From 1536c53517ae0df0b8380e557be732470401939a Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 11 Jan 2024 07:46:39 +0000 Subject: [PATCH] remove the log_report_frequency from public access --- docs/source/configuration/configuration-run.rst | 7 +------ examples/peft_llm_gnn/nc_config_Video_Games.yaml | 3 --- python/graphstorm/gsf.py | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/docs/source/configuration/configuration-run.rst b/docs/source/configuration/configuration-run.rst index f0c1afa854..276e97d08d 100644 --- a/docs/source/configuration/configuration-run.rst +++ b/docs/source/configuration/configuration-run.rst @@ -126,11 +126,6 @@ GraphStorm provides a set of parameters to control how and where to save and res - Yaml: ``task_tracker: sagemaker_task_tracker`` - Argument: ``--task_tracker sagemaker_task_tracker`` - Default value: ``sagemaker_task_tracker`` -- **log_report_frequency**: The frequency of reporting model performance metrics through task_tracker. The frequency is defined by using number of iterations, i.e., every N iterations the evaluation metrics will be reported. (Please note the evaluation metrics should be generated at the reporting iteration. See "eval_frequency" for how evaluation frequency is controlled.) - - - Yaml: ``log_report_frequency: 1000`` - - Argument: ``--log-report-frequency 1000`` - - Default value: ``1000`` - **restore_model_path**: A path where GraphStorm model parameters were saved. For training, if restore_model_path is set, GraphStom will retrieve the model parameters from restore_model_path instead of initializing the parameters. For inference, restore_model_path must be provided. - Yaml: ``restore_model_path: /model/checkpoint/`` @@ -278,7 +273,7 @@ GraphStorm provides a set of parameters to control model evaluation. - Yaml: ``use_mini_batch_infer: false`` - Argument: ``--use-mini-batch-infer false`` - Default value: ``true`` -- **eval_frequency**: The frequency of doing evaluation. GraphStorm trainers do evaluation at the end of each epoch. However, for large-scale graphs, training one epoch may take hundreds of thousands of iterations. One may want to do evaluations in the middle of an epoch. When eval_frequency is set, every **eval_frequency** iterations, the trainer will do evaluation once. The evaluation results can be printed and reported. See **log_report_frequency** for more details. +- **eval_frequency**: The frequency of doing evaluation. GraphStorm trainers do evaluation at the end of each epoch. However, for large-scale graphs, training one epoch may take hundreds of thousands of iterations. One may want to do evaluations in the middle of an epoch. When eval_frequency is set, every **eval_frequency** iterations, the trainer will do evaluation once. The evaluation results can be printed and reported. - Yaml: ``eval_frequency: 10000`` - Argument: ``--eval-frequency 10000`` diff --git a/examples/peft_llm_gnn/nc_config_Video_Games.yaml b/examples/peft_llm_gnn/nc_config_Video_Games.yaml index 6a2d0129dd..626553d8c9 100644 --- a/examples/peft_llm_gnn/nc_config_Video_Games.yaml +++ b/examples/peft_llm_gnn/nc_config_Video_Games.yaml @@ -19,11 +19,8 @@ gsf: batch_size: 4 dropout: 0.1 eval_batch_size: 4 - # eval_frequency: 100 - #log_report_frequency: 50 lr: 0.0001 num_epochs: 10 - # save_model_frequency: 300 wd_l2norm: 1.0e-06 input: restore_model_path: null diff --git a/python/graphstorm/gsf.py b/python/graphstorm/gsf.py index 1dfd3cf780..97dabd1164 100644 --- a/python/graphstorm/gsf.py +++ b/python/graphstorm/gsf.py @@ -656,4 +656,4 @@ def check_homo(g): def create_builtin_task_tracker(config): tracker_class = get_task_tracker_class(config.task_tracker) - return tracker_class(config.log_report_frequency) + return tracker_class(config.eval_frequency)