From b0eae5a5720a733ffe7a052681abd4c267f78abd Mon Sep 17 00:00:00 2001 From: "askerosted@gmail.com" Date: Tue, 22 Oct 2024 14:09:48 +0900 Subject: [PATCH 1/3] add_comment --- examples/04_training/01_train_dynedge.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/examples/04_training/01_train_dynedge.py b/examples/04_training/01_train_dynedge.py index c61df789b..efd6c2c53 100644 --- a/examples/04_training/01_train_dynedge.py +++ b/examples/04_training/01_train_dynedge.py @@ -81,10 +81,14 @@ def main( # Log configuration to W&B wandb_logger.experiment.config.update(config) - # Define graph representation + # Define graph/data representation, here the KNNGraph is used. + # The KNNGraph is a graph representation which uses the KNNEdges edge definition with 8 neighbours as default. + # The graph representation is defined by the detector, in this case the Prometheus detector. + # The standard node definition is used, which is NodesAsPulses. graph_definition = KNNGraph(detector=Prometheus()) - # Use GraphNetDataModule to load in data + # Use GraphNetDataModule to load in data and create dataloaders + # The input here depends on the dataset being used, in this case the Prometheus dataset. dm = GraphNeTDataModule( dataset_reference=config["dataset_reference"], dataset_args={ @@ -109,11 +113,16 @@ def main( validation_dataloader = dm.val_dataloader # Building model - + # Define the backbone architecture, in this example we use the DynEdge architecture + # described in detail in the Jinst paper: https://iopscience.iop.org/article/10.1088/1748-0221/17/11/P11003 backbone = DynEdge( nb_inputs=graph_definition.nb_outputs, global_pooling_schemes=["min", "max", "mean", "sum"], ) + # Define the task, loss function as well as optional transformation. + # In this case we are performing energy reconstruction, with a LogCoshLoss function. + # The target and prediction are transformed using the log10 function. When infering + # the prediction is transformed back to the original scale using 10^x. task = EnergyReconstruction( hidden_size=backbone.nb_outputs, target_labels=config["target"], @@ -121,6 +130,8 @@ def main( transform_prediction_and_target=lambda x: torch.log10(x), transform_inference=lambda x: torch.pow(10, x), ) + # Define the full model, which includes the backbone, task(s) along with typical + # machine learning options such as learning rate optimizer and scheduler. model = StandardModel( graph_definition=graph_definition, backbone=backbone, From 1993eae3e2334e4d8256fe965f1d9b0f97ce216e Mon Sep 17 00:00:00 2001 From: "askerosted@gmail.com" Date: Tue, 22 Oct 2024 14:51:53 +0900 Subject: [PATCH 2/3] slight_adjust --- examples/04_training/01_train_dynedge.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/04_training/01_train_dynedge.py b/examples/04_training/01_train_dynedge.py index efd6c2c53..3e31fc126 100644 --- a/examples/04_training/01_train_dynedge.py +++ b/examples/04_training/01_train_dynedge.py @@ -113,13 +113,14 @@ def main( validation_dataloader = dm.val_dataloader # Building model - # Define the backbone architecture, in this example we use the DynEdge architecture + + # Define architecture of the backbone, in this example we use the DynEdge architecture # described in detail in the Jinst paper: https://iopscience.iop.org/article/10.1088/1748-0221/17/11/P11003 backbone = DynEdge( nb_inputs=graph_definition.nb_outputs, global_pooling_schemes=["min", "max", "mean", "sum"], ) - # Define the task, loss function as well as optional transformation. + # Define the task. # In this case we are performing energy reconstruction, with a LogCoshLoss function. # The target and prediction are transformed using the log10 function. When infering # the prediction is transformed back to the original scale using 10^x. From 3cde04d97b8e5a4b9edaa082f7fb642013fea50f Mon Sep 17 00:00:00 2001 From: "askerosted@gmail.com" Date: Wed, 23 Oct 2024 09:38:00 +0900 Subject: [PATCH 3/3] code climate fixes --- examples/04_training/01_train_dynedge.py | 26 +++++++++++++++--------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/examples/04_training/01_train_dynedge.py b/examples/04_training/01_train_dynedge.py index 3e31fc126..b8a8e82b7 100644 --- a/examples/04_training/01_train_dynedge.py +++ b/examples/04_training/01_train_dynedge.py @@ -82,13 +82,16 @@ def main( wandb_logger.experiment.config.update(config) # Define graph/data representation, here the KNNGraph is used. - # The KNNGraph is a graph representation which uses the KNNEdges edge definition with 8 neighbours as default. - # The graph representation is defined by the detector, in this case the Prometheus detector. + # The KNNGraph is a graph representation, which uses the + # KNNEdges edge definition with 8 neighbours as default. + # The graph representation is defined by the detector, + # in this case the Prometheus detector. # The standard node definition is used, which is NodesAsPulses. graph_definition = KNNGraph(detector=Prometheus()) # Use GraphNetDataModule to load in data and create dataloaders - # The input here depends on the dataset being used, in this case the Prometheus dataset. + # The input here depends on the dataset being used, + # in this case the Prometheus dataset. dm = GraphNeTDataModule( dataset_reference=config["dataset_reference"], dataset_args={ @@ -114,16 +117,18 @@ def main( # Building model - # Define architecture of the backbone, in this example we use the DynEdge architecture - # described in detail in the Jinst paper: https://iopscience.iop.org/article/10.1088/1748-0221/17/11/P11003 + # Define architecture of the backbone, in this example + # the DynEdge architecture is used. + # https://iopscience.iop.org/article/10.1088/1748-0221/17/11/P11003 backbone = DynEdge( nb_inputs=graph_definition.nb_outputs, global_pooling_schemes=["min", "max", "mean", "sum"], ) # Define the task. - # In this case we are performing energy reconstruction, with a LogCoshLoss function. - # The target and prediction are transformed using the log10 function. When infering - # the prediction is transformed back to the original scale using 10^x. + # Here an energy reconstruction, with a LogCoshLoss function. + # The target and prediction are transformed using the log10 function. + # When infering the prediction is transformed back to the + # original scale using 10^x. task = EnergyReconstruction( hidden_size=backbone.nb_outputs, target_labels=config["target"], @@ -131,8 +136,9 @@ def main( transform_prediction_and_target=lambda x: torch.log10(x), transform_inference=lambda x: torch.pow(10, x), ) - # Define the full model, which includes the backbone, task(s) along with typical - # machine learning options such as learning rate optimizer and scheduler. + # Define the full model, which includes the backbone, task(s), + # along with typical machine learning options such as + # learning rate optimizers and schedulers. model = StandardModel( graph_definition=graph_definition, backbone=backbone,