From 6a06d654a48563b8d70f0297412b08d2bff57c64 Mon Sep 17 00:00:00 2001 From: Rasmus Oersoe Date: Mon, 20 May 2024 16:37:34 +0200 Subject: [PATCH 01/32] revert changes on main --- src/graphnet/data/dataconverter.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/graphnet/data/dataconverter.py b/src/graphnet/data/dataconverter.py index a08f6946b..bf57fac89 100644 --- a/src/graphnet/data/dataconverter.py +++ b/src/graphnet/data/dataconverter.py @@ -1,5 +1,4 @@ """Contains `DataConverter`.""" - from typing import List, Union, OrderedDict, Dict, Tuple, Any, Optional, Type from abc import abstractmethod, ABC @@ -103,7 +102,8 @@ def __call__(self, input_dir: Union[str, List[str]]) -> None: self._output_files = [ os.path.join( self._output_dir, - self._create_file_name(file) + self._save_method.file_extension, + self._create_file_name(file) + + self._save_method.file_extension, ) for file in input_files ] @@ -263,7 +263,9 @@ def _request_event_nos(self, n_ids: int) -> List[int]: global_index.value += n_ids # type: ignore[name-defined] else: starting_index = self._index - event_nos = np.arange(starting_index, starting_index + n_ids, 1).tolist() + event_nos = np.arange( + starting_index, starting_index + n_ids, 1 + ).tolist() self._index += n_ids return event_nos @@ -318,7 +320,9 @@ def _update_shared_variables( self._output_files.extend(list(sorted(output_files[:]))) @final - def merge_files(self, files: Optional[List[str]] = None, **kwargs: Any) -> None: + def merge_files( + self, files: Optional[List[str]] = None, **kwargs: Any + ) -> None: """Merge converted files. `DataConverter` will call the `.merge_files` method in the @@ -333,8 +337,6 @@ def merge_files(self, files: Optional[List[str]] = None, **kwargs: Any) -> None: files_to_merge = self._output_files elif files is not None: # Proceed to merge specified by user. - if isinstance(files, str): - files = [files] # Cast to list if user forgot files_to_merge = files else: # Raise error From c1b40998b07cecb7cbcbb65351e4ba275f0aef9c Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 19:24:09 +0200 Subject: [PATCH 02/32] add NormalizingFlow --- src/graphnet/models/__init__.py | 1 + src/graphnet/models/easy_model.py | 4 + .../models/graphs/graph_definition.py | 21 +++- src/graphnet/models/graphs/graphs.py | 4 + src/graphnet/models/normalizing_flow.py | 115 ++++++++++++++++++ src/graphnet/models/task/task.py | 102 +++++++++------- src/graphnet/models/utils.py | 9 ++ 7 files changed, 209 insertions(+), 47 deletions(-) create mode 100644 src/graphnet/models/normalizing_flow.py diff --git a/src/graphnet/models/__init__.py b/src/graphnet/models/__init__.py index a2e63befb..a7e0a064b 100644 --- a/src/graphnet/models/__init__.py +++ b/src/graphnet/models/__init__.py @@ -11,3 +11,4 @@ from .model import Model from .standard_model import StandardModel from .standard_averaged_model import StandardAveragedModel +from .normalizing_flow import NormalizingFlow \ No newline at end of file diff --git a/src/graphnet/models/easy_model.py b/src/graphnet/models/easy_model.py index d26d88fa0..b1c51c087 100644 --- a/src/graphnet/models/easy_model.py +++ b/src/graphnet/models/easy_model.py @@ -292,6 +292,7 @@ def predict( dataloader: DataLoader, gpus: Optional[Union[List[int], int]] = None, distribution_strategy: Optional[str] = "auto", + **trainer_kwargs, ) -> List[Tensor]: """Return predictions for `dataloader`.""" self.inference() @@ -305,6 +306,7 @@ def predict( gpus=gpus, distribution_strategy=distribution_strategy, callbacks=callbacks, + **trainer_kwargs, ) predictions_list = inference_trainer.predict(self, dataloader) @@ -325,6 +327,7 @@ def predict_as_dataframe( additional_attributes: Optional[List[str]] = None, gpus: Optional[Union[List[int], int]] = None, distribution_strategy: Optional[str] = "auto", + **trainer_kwargs, ) -> pd.DataFrame: """Return predictions for `dataloader` as a DataFrame. @@ -357,6 +360,7 @@ def predict_as_dataframe( dataloader=dataloader, gpus=gpus, distribution_strategy=distribution_strategy, + **trainer_kwargs, ) predictions = ( torch.cat(predictions_torch, dim=1).detach().cpu().numpy() diff --git a/src/graphnet/models/graphs/graph_definition.py b/src/graphnet/models/graphs/graph_definition.py index e384425f9..6c9a0a419 100644 --- a/src/graphnet/models/graphs/graph_definition.py +++ b/src/graphnet/models/graphs/graph_definition.py @@ -34,6 +34,7 @@ def __init__( sensor_mask: Optional[List[int]] = None, string_mask: Optional[List[int]] = None, sort_by: str = None, + repeat_labels: bool =False, ): """Construct ´GraphDefinition´. The ´detector´ holds. @@ -62,9 +63,14 @@ def __init__( add_inactive_sensors: If True, inactive sensors will be appended to the graph with padded pulse information. Defaults to False. sensor_mask: A list of sensor id's to be masked from the graph. Any - sensor listed here will be removed from the graph. Defaults to None. - string_mask: A list of string id's to be masked from the graph. Defaults to None. + sensor listed here will be removed from the graph. + Defaults to None. + string_mask: A list of string id's to be masked from the graph. + Defaults to None. sort_by: Name of node feature to sort by. Defaults to None. + repeat_labels: If True, labels will be repeated to match the + the number of rows in the output of the GraphDefinition. + Defaults to False. """ # Base class constructor super().__init__(name=__name__, class_name=self.__class__.__name__) @@ -80,6 +86,7 @@ def __init__( self._sensor_mask = sensor_mask self._string_mask = string_mask self._add_inactive_sensors = add_inactive_sensors + self._repeat_labels = repeat_labels self._resolve_masks() @@ -411,7 +418,10 @@ def _add_truth( for truth_dict in truth_dicts: for key, value in truth_dict.items(): try: - graph[key] = torch.tensor(value) + label = torch.tensor(value) + if self._repeat_labels: + label = label.repeat(graph.x.shape[0],1) + graph[key] = label except TypeError: # Cannot convert `value` to Tensor due to its data type, # e.g. `str`. @@ -448,5 +458,8 @@ def _add_custom_labels( ) -> Data: # Add custom labels to the graph for key, fn in custom_label_functions.items(): - graph[key] = fn(graph) + label = fn(graph) + if self._repeat_labels: + label = label.repeat(graph.x.shape[0],1) + graph[key] = label return graph diff --git a/src/graphnet/models/graphs/graphs.py b/src/graphnet/models/graphs/graphs.py index 0289b943d..6e2ac086d 100644 --- a/src/graphnet/models/graphs/graphs.py +++ b/src/graphnet/models/graphs/graphs.py @@ -23,6 +23,7 @@ def __init__( seed: Optional[Union[int, Generator]] = None, nb_nearest_neighbours: int = 8, columns: List[int] = [0, 1, 2], + **kwargs ) -> None: """Construct k-nn graph representation. @@ -53,6 +54,7 @@ def __init__( input_feature_names=input_feature_names, perturbation_dict=perturbation_dict, seed=seed, + **kwargs ) @@ -70,6 +72,7 @@ def __init__( dtype: Optional[torch.dtype] = torch.float, perturbation_dict: Optional[Dict[str, float]] = None, seed: Optional[Union[int, Generator]] = None, + **kwargs ) -> None: """Construct isolated nodes graph representation. @@ -94,4 +97,5 @@ def __init__( input_feature_names=input_feature_names, perturbation_dict=perturbation_dict, seed=seed, + **kwargs ) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py new file mode 100644 index 000000000..42266f79c --- /dev/null +++ b/src/graphnet/models/normalizing_flow.py @@ -0,0 +1,115 @@ +"""Standard model class(es).""" + +from typing import Any, Dict, List, Optional, Union, Type +import torch +from torch import Tensor +from torch_geometric.data import Data +from torch.optim import Adam + +from graphnet.models.gnn.gnn import GNN +from .easy_model import EasySyntax +from graphnet.models.task import StandardFlowTask +from graphnet.models.graphs import GraphDefinition +from graphnet.models.utils import get_fields + + +class NormalizingFlow(EasySyntax): + """A Standard way of combining model components in GraphNeT. + + This model is compatible with the vast majority of supervised learning + tasks such as regression, binary and multi-label classification. + + Capable of producing both event-level and pulse-level predictions. + """ + + def __init__( + self, + graph_definition: GraphDefinition, + target_labels: str, + backbone: GNN = None, + condition_on: Union[str, List[str], None] = None, + flow_layers: str = 'gggt', + optimizer_class: Type[torch.optim.Optimizer] = Adam, + optimizer_kwargs: Optional[Dict] = None, + scheduler_class: Optional[type] = None, + scheduler_kwargs: Optional[Dict] = None, + scheduler_config: Optional[Dict] = None, + ) -> None: + """Construct `NormalizingFlow`.""" + + # Handle args + if backbone is not None: + assert isinstance(backbone, GNN) + hidden_size = backbone.nb_outputs + else: + if isinstance(condition_on, str): + condition_on = [condition_on] + hidden_size = len(condition_on) + + # Build Flow Task + task = StandardFlowTask(hidden_size=hidden_size, + flow_layers=flow_layers, + target_labels = target_labels) + + + # Base class constructor + super().__init__( + tasks=task, + optimizer_class=optimizer_class, + optimizer_kwargs=optimizer_kwargs, + scheduler_class=scheduler_class, + scheduler_kwargs=scheduler_kwargs, + scheduler_config=scheduler_config, + ) + + # Member variable(s) + self._graph_definition = graph_definition + self.backbone = backbone + self._condition_on = condition_on + + def forward( + self, data: Union[Data, List[Data]] + ) -> List[Union[Tensor, Data]]: + """Forward pass, chaining model components.""" + if self.backbone is not None: + x = self._backbone(data) + elif self._condition_on is not None: + x = get_fields(data = data, + fields = self._condition_on) + return self._tasks[0](x, data) + + def _backbone( + self, data: Union[Data, List[Data]] + ) -> List[Union[Tensor, Data]]: + if isinstance(data, Data): + data = [data] + x_list = [] + for d in data: + x = self.backbone(d) + x_list.append(x) + x = torch.cat(x_list, dim=0) + return x + + + def shared_step(self, batch: List[Data], batch_idx: int) -> Tensor: + """Perform shared step. + + Applies the forward pass and the following loss calculation, shared + between the training and validation step. + """ + loss = self(batch) + return torch.mean(loss, dim = 0) + + def validate_tasks(self) -> None: + """Verify that self._tasks contain compatible elements.""" + accepted_tasks = (StandardFlowTask) + for task in self._tasks: + assert isinstance(task, accepted_tasks) + + def sample(self, data, n_samples, target_range = [0,1000]): + self._sample = True + self._n_samples = n_samples + self._target_range = target_range + labels, nllh = self(data) + self._sample = False + return labels, nllh diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index cd750f35d..b33636d11 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -4,11 +4,14 @@ from typing import Any, TYPE_CHECKING, List, Tuple, Union from typing import Callable, Optional import numpy as np +from copy import deepcopy import torch from torch import Tensor from torch.nn import Linear from torch_geometric.data import Data +import jammy_flows +from torch.distributions.uniform import Uniform if TYPE_CHECKING: # Avoid cyclic dependency @@ -16,6 +19,7 @@ from graphnet.models import Model from graphnet.utilities.decorators import final +from graphnet.models.utils import get_fields class Task(Model): @@ -39,7 +43,6 @@ def default_prediction_labels(self) -> List[str]: def __init__( self, *, - loss_function: "LossFunction", target_labels: Optional[Union[str, List[str]]] = None, prediction_labels: Optional[Union[str, List[str]]] = None, transform_prediction_and_target: Optional[Callable] = None, @@ -51,7 +54,6 @@ def __init__( """Construct `Task`. Args: - loss_function: Loss function appropriate to the task. target_labels: Name(s) of the quantity/-ies being predicted, used to extract the target tensor(s) from the `Data` object in `.compute_loss(...)`. @@ -101,7 +103,6 @@ def __init__( self._regularisation_loss: Optional[float] = None self._target_labels = target_labels self._prediction_labels = prediction_labels - self._loss_function = loss_function self._inference = False self._loss_weight = loss_weight @@ -229,6 +230,7 @@ class LearnedTask(Task): def __init__( self, hidden_size: int, + loss_function: "LossFunction", **task_kwargs: Any, ): """Construct `LearnedTask`. @@ -237,11 +239,14 @@ def __init__( hidden_size: The number of columns in the output of the last latent layer of `Model` using this Task. Available through `Model.nb_outputs` + loss_function: Loss function appropriate to the task. + """ # Base class constructor super().__init__(**task_kwargs) # Mapping from last hidden layer to required size of input + self._loss_function = loss_function self._affine = Linear(hidden_size, self.nb_inputs) @abstractmethod @@ -384,62 +389,73 @@ class StandardFlowTask(Task): def __init__( self, - target_labels: List[str], + hidden_size: Union[int, None], + flow_layers: str = "gggt", **task_kwargs: Any, ): """Construct `StandardLearnedTask`. Args: target_labels: A list of names for the targets of this Task. + flow_layers: A string indicating the flow layer types. hidden_size: The number of columns in the output of the last latent layer of `Model` using this Task. - Available through `Model.nb_outputs` + Available through `Model.nb_outputs` """ # Base class constructor - super().__init__(target_labels=target_labels, **task_kwargs) + + + # Member variables + self._default_prediction_labels = ["nllh"] + self._hidden_size = hidden_size + super().__init__(**task_kwargs) + self._flow = jammy_flows.pdf(f"e{len(self._target_labels)}", + flow_layers, + conditional_input_dim = hidden_size) + self._initialized = False + + @property + def default_prediction_labels(self) -> List[str]: + """Return default prediction labels.""" + return self._default_prediction_labels def nb_inputs(self) -> int: """Return number of inputs assumed by task.""" - return len(self._target_labels) - - def _forward(self, x: Tensor, jacobian: Tensor) -> Tensor: # type: ignore - # Leave it as is. - return x + return self._hidden_size + + def _forward(self, x: Tensor, y: Tensor) -> Tensor: # type: ignore + if x is not None: + if x.shape[0] != y.shape[0]: + raise AssertionError(f"Targets {self._target_labels} have " + f"{y.shape[0]} rows while conditional " + f"inputs have {x.shape[0]} rows. " + "The number of rows must match.") + log_pdf, _,_ = self._flow(y, conditional_input = x) + else: + log_pdf, _,_ = self._flow(y) + return -log_pdf.reshape(-1,1) @final def forward( - self, x: Union[Tensor, Data], jacobian: Optional[Tensor] - ) -> Union[Tensor, Data]: + self, x: Union[Tensor, Data], data: List[Data]) -> Union[Tensor, Data]: """Forward pass.""" - self._regularisation_loss = 0 # Reset - x = self._forward(x, jacobian) + # Manually cast pdf to correct dtype - is there a better way? + self._flow = self._flow.to(x.dtype) + # Get target values + labels = get_fields(data = data, + fields = self._target_labels).to(x.dtype) + # Set the initial parameters of flow close to truth + # This speeds up training and helps with NaN + if self._initialized is False: + self._flow.init_params(data=deepcopy(labels).cpu()) + self._flow.to(self.device) + self._initialized = True # This is only done once + # Compute nllh + x = self._forward(x, labels) return self._transform_prediction(x) - @final - def compute_loss( - self, prediction: Tensor, jacobian: Tensor, data: Data - ) -> Tensor: - """Compute loss for normalizing flow tasks. - - Args: - prediction: transformed sample in latent distribution space. - jacobian: the jacobian associated with the transformation. - data: the graph object. - - Returns: - the loss associated with the transformation. - """ - if self._loss_weight is not None: - weights = data[self._loss_weight] - else: - weights = None - loss = ( - self._loss_function( - prediction=prediction, - jacobian=jacobian, - weights=weights, - target=None, - ) - + self._regularisation_loss - ) - return loss + def sample(self, x, data, n_samples, target_range): + self.inference() + with torch.no_grad(): + labels = Uniform(target_range[0], target_range[1]).sample((n_samples, 1)) + return labels, self._forward(y= labels, x = x.repeat(n_samples,1)) diff --git a/src/graphnet/models/utils.py b/src/graphnet/models/utils.py index d05e8223f..73a4f56f3 100644 --- a/src/graphnet/models/utils.py +++ b/src/graphnet/models/utils.py @@ -7,6 +7,7 @@ from torch import Tensor, LongTensor from torch_geometric.utils import homophily +from torch_geometric.data import Data def calculate_xyzt_homophily( @@ -103,3 +104,11 @@ def array_to_sequence( mask = torch.ne(x[:, :, 1], excluding_value) x[~mask] = padding_value return x, mask, seq_length + +def get_fields(data: List[Data], fields: List[str]) -> Tensor: + labels = [] + if not isinstance(data, list): + data = [data] + for label in list(fields): + labels.append(torch.cat([d[label].reshape(-1,1) for d in data], dim=0)) + return torch.cat(labels, dim = 1) \ No newline at end of file From 1e14f543984dbd3f86550bf380a08c5870c52e20 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 19:52:31 +0200 Subject: [PATCH 03/32] check --- .pre-commit-config.yaml | 4 ++++ src/graphnet/models/task/task.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4794b3745..fd6bae19e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,16 +10,20 @@ repos: rev: 4.0.1 hooks: - id: flake8 + language_version: python3 - repo: https://github.com/pycqa/docformatter rev: v1.5.0 hooks: - id: docformatter + language_version: python3 - repo: https://github.com/pycqa/pydocstyle rev: 6.1.1 hooks: - id: pydocstyle + language_version: python3 - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 hooks: - id: mypy args: [--follow-imports=silent, --disallow-untyped-defs, --disallow-incomplete-defs, --disallow-untyped-calls] + language_version: python3 \ No newline at end of file diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index b33636d11..df6d81948 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -454,7 +454,7 @@ def forward( x = self._forward(x, labels) return self._transform_prediction(x) - def sample(self, x, data, n_samples, target_range): + def sample(self, x, data: int, n_samples, target_range): self.inference() with torch.no_grad(): labels = Uniform(target_range[0], target_range[1]).sample((n_samples, 1)) From 0c135b72302ee65505846050ebe8c3e50e0b0e97 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 19:54:02 +0200 Subject: [PATCH 04/32] hooks --- src/graphnet/models/task/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index df6d81948..d69412daa 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -454,7 +454,7 @@ def forward( x = self._forward(x, labels) return self._transform_prediction(x) - def sample(self, x, data: int, n_samples, target_range): + def sample(self, x, data: float, n_samples, target_range): self.inference() with torch.no_grad(): labels = Uniform(target_range[0], target_range[1]).sample((n_samples, 1)) From ce1223d853306855eb4a739979255646171614d0 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 19:57:31 +0200 Subject: [PATCH 05/32] hooks --- src/graphnet/models/task/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index d69412daa..df6d81948 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -454,7 +454,7 @@ def forward( x = self._forward(x, labels) return self._transform_prediction(x) - def sample(self, x, data: float, n_samples, target_range): + def sample(self, x, data: int, n_samples, target_range): self.inference() with torch.no_grad(): labels = Uniform(target_range[0], target_range[1]).sample((n_samples, 1)) From 88863769bc1a45a71c4917e329a9aa4a0b2b3ca7 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 21:37:01 +0200 Subject: [PATCH 06/32] hooks --- src/graphnet/models/task/task.py | 48 +++++++++++++++----------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index df6d81948..bb1842191 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -240,7 +240,6 @@ def __init__( the last latent layer of `Model` using this Task. Available through `Model.nb_outputs` loss_function: Loss function appropriate to the task. - """ # Base class constructor super().__init__(**task_kwargs) @@ -400,18 +399,19 @@ def __init__( flow_layers: A string indicating the flow layer types. hidden_size: The number of columns in the output of the last latent layer of `Model` using this Task. - Available through `Model.nb_outputs` + Available through `Model.nb_outputs` """ # Base class constructor - - + # Member variables self._default_prediction_labels = ["nllh"] self._hidden_size = hidden_size super().__init__(**task_kwargs) - self._flow = jammy_flows.pdf(f"e{len(self._target_labels)}", - flow_layers, - conditional_input_dim = hidden_size) + self._flow = jammy_flows.pdf( + f"e{len(self._target_labels)}", + flow_layers, + conditional_input_dim=hidden_size, + ) self._initialized = False @property @@ -419,43 +419,39 @@ def default_prediction_labels(self) -> List[str]: """Return default prediction labels.""" return self._default_prediction_labels - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" + def nb_inputs(self) -> Union[int, None]: # type: ignore + """Return number of conditional inputs assumed by task.""" return self._hidden_size def _forward(self, x: Tensor, y: Tensor) -> Tensor: # type: ignore if x is not None: if x.shape[0] != y.shape[0]: - raise AssertionError(f"Targets {self._target_labels} have " - f"{y.shape[0]} rows while conditional " - f"inputs have {x.shape[0]} rows. " - "The number of rows must match.") - log_pdf, _,_ = self._flow(y, conditional_input = x) + raise AssertionError( + f"Targets {self._target_labels} have " + f"{y.shape[0]} rows while conditional " + f"inputs have {x.shape[0]} rows. " + "The number of rows must match." + ) + log_pdf, _, _ = self._flow(y, conditional_input=x) else: - log_pdf, _,_ = self._flow(y) - return -log_pdf.reshape(-1,1) + log_pdf, _, _ = self._flow(y) + return -log_pdf.reshape(-1, 1) @final def forward( - self, x: Union[Tensor, Data], data: List[Data]) -> Union[Tensor, Data]: + self, x: Union[Tensor, Data], data: List[Data] + ) -> Union[Tensor, Data]: """Forward pass.""" # Manually cast pdf to correct dtype - is there a better way? self._flow = self._flow.to(x.dtype) # Get target values - labels = get_fields(data = data, - fields = self._target_labels).to(x.dtype) + labels = get_fields(data=data, fields=self._target_labels).to(x.dtype) # Set the initial parameters of flow close to truth # This speeds up training and helps with NaN if self._initialized is False: self._flow.init_params(data=deepcopy(labels).cpu()) self._flow.to(self.device) - self._initialized = True # This is only done once + self._initialized = True # This is only done once # Compute nllh x = self._forward(x, labels) return self._transform_prediction(x) - - def sample(self, x, data: int, n_samples, target_range): - self.inference() - with torch.no_grad(): - labels = Uniform(target_range[0], target_range[1]).sample((n_samples, 1)) - return labels, self._forward(y= labels, x = x.repeat(n_samples,1)) From 9d8b5608e873865f3a0fe5e6eb877d2c3126ed53 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 21:39:11 +0200 Subject: [PATCH 07/32] black --- src/graphnet/models/easy_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/graphnet/models/easy_model.py b/src/graphnet/models/easy_model.py index b1c51c087..d3ed4f419 100644 --- a/src/graphnet/models/easy_model.py +++ b/src/graphnet/models/easy_model.py @@ -16,7 +16,6 @@ from pytorch_lightning.loggers import Logger as LightningLogger from graphnet.training.callbacks import ProgressBar -from graphnet.models.graphs import GraphDefinition from graphnet.models.model import Model from graphnet.models.task import StandardLearnedTask @@ -292,7 +291,7 @@ def predict( dataloader: DataLoader, gpus: Optional[Union[List[int], int]] = None, distribution_strategy: Optional[str] = "auto", - **trainer_kwargs, + **trainer_kwargs: Any, ) -> List[Tensor]: """Return predictions for `dataloader`.""" self.inference() @@ -327,7 +326,7 @@ def predict_as_dataframe( additional_attributes: Optional[List[str]] = None, gpus: Optional[Union[List[int], int]] = None, distribution_strategy: Optional[str] = "auto", - **trainer_kwargs, + **trainer_kwargs: Any, ) -> pd.DataFrame: """Return predictions for `dataloader` as a DataFrame. From dbb02c4042fa2ca28d126c766690a6f79cc7dec0 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 21:45:38 +0200 Subject: [PATCH 08/32] black --- src/graphnet/models/normalizing_flow.py | 34 ++++++++++--------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index 42266f79c..f84caa881 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -28,7 +28,7 @@ def __init__( target_labels: str, backbone: GNN = None, condition_on: Union[str, List[str], None] = None, - flow_layers: str = 'gggt', + flow_layers: str = "gggt", optimizer_class: Type[torch.optim.Optimizer] = Adam, optimizer_kwargs: Optional[Dict] = None, scheduler_class: Optional[type] = None, @@ -36,21 +36,23 @@ def __init__( scheduler_config: Optional[Dict] = None, ) -> None: """Construct `NormalizingFlow`.""" - # Handle args if backbone is not None: assert isinstance(backbone, GNN) hidden_size = backbone.nb_outputs - else: + elif condition_on is not None: if isinstance(condition_on, str): condition_on = [condition_on] hidden_size = len(condition_on) + else: + hidden_size = None # Build Flow Task - task = StandardFlowTask(hidden_size=hidden_size, - flow_layers=flow_layers, - target_labels = target_labels) - + task = StandardFlowTask( + hidden_size=hidden_size, + flow_layers=flow_layers, + target_labels=target_labels, + ) # Base class constructor super().__init__( @@ -74,13 +76,14 @@ def forward( if self.backbone is not None: x = self._backbone(data) elif self._condition_on is not None: - x = get_fields(data = data, - fields = self._condition_on) + assert isinstance(self._condition_on, list) + x = get_fields(data=data, fields=self._condition_on) return self._tasks[0](x, data) def _backbone( self, data: Union[Data, List[Data]] ) -> List[Union[Tensor, Data]]: + assert self.backbone is not None if isinstance(data, Data): data = [data] x_list = [] @@ -89,7 +92,6 @@ def _backbone( x_list.append(x) x = torch.cat(x_list, dim=0) return x - def shared_step(self, batch: List[Data], batch_idx: int) -> Tensor: """Perform shared step. @@ -98,18 +100,10 @@ def shared_step(self, batch: List[Data], batch_idx: int) -> Tensor: between the training and validation step. """ loss = self(batch) - return torch.mean(loss, dim = 0) + return torch.mean(loss, dim=0) def validate_tasks(self) -> None: """Verify that self._tasks contain compatible elements.""" - accepted_tasks = (StandardFlowTask) + accepted_tasks = StandardFlowTask for task in self._tasks: assert isinstance(task, accepted_tasks) - - def sample(self, data, n_samples, target_range = [0,1000]): - self._sample = True - self._n_samples = n_samples - self._target_range = target_range - labels, nllh = self(data) - self._sample = False - return labels, nllh From 9b90af40a9726714b2c0b14a171bb01b504e1275 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 21:47:12 +0200 Subject: [PATCH 09/32] black --- src/graphnet/models/utils.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/graphnet/models/utils.py b/src/graphnet/models/utils.py index 73a4f56f3..11b73d06f 100644 --- a/src/graphnet/models/utils.py +++ b/src/graphnet/models/utils.py @@ -1,6 +1,6 @@ """Utility functions for `graphnet.models`.""" -from typing import List, Tuple, Any +from typing import List, Tuple, Any, Union from torch_geometric.nn import knn_graph from torch_geometric.data import Batch import torch @@ -105,10 +105,14 @@ def array_to_sequence( x[~mask] = padding_value return x, mask, seq_length -def get_fields(data: List[Data], fields: List[str]) -> Tensor: - labels = [] - if not isinstance(data, list): - data = [data] - for label in list(fields): - labels.append(torch.cat([d[label].reshape(-1,1) for d in data], dim=0)) - return torch.cat(labels, dim = 1) \ No newline at end of file + +def get_fields(data: Union[Data, List[Data]], fields: List[str]) -> Tensor: + """Extract named fields in Data object.""" + labels = [] + if not isinstance(data, list): + data = [data] + for label in list(fields): + labels.append( + torch.cat([d[label].reshape(-1, 1) for d in data], dim=0) + ) + return torch.cat(labels, dim=1) From 382651ffb2348b2757beda77ea4405a02d088555 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 28 May 2024 21:48:28 +0200 Subject: [PATCH 10/32] black --- src/graphnet/models/graphs/graph_definition.py | 11 ++++++----- src/graphnet/models/graphs/graphs.py | 10 +++++----- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/graphnet/models/graphs/graph_definition.py b/src/graphnet/models/graphs/graph_definition.py index 6c9a0a419..0338225b8 100644 --- a/src/graphnet/models/graphs/graph_definition.py +++ b/src/graphnet/models/graphs/graph_definition.py @@ -34,7 +34,7 @@ def __init__( sensor_mask: Optional[List[int]] = None, string_mask: Optional[List[int]] = None, sort_by: str = None, - repeat_labels: bool =False, + repeat_labels: bool = False, ): """Construct ´GraphDefinition´. The ´detector´ holds. @@ -63,9 +63,9 @@ def __init__( add_inactive_sensors: If True, inactive sensors will be appended to the graph with padded pulse information. Defaults to False. sensor_mask: A list of sensor id's to be masked from the graph. Any - sensor listed here will be removed from the graph. + sensor listed here will be removed from the graph. Defaults to None. - string_mask: A list of string id's to be masked from the graph. + string_mask: A list of string id's to be masked from the graph. Defaults to None. sort_by: Name of node feature to sort by. Defaults to None. repeat_labels: If True, labels will be repeated to match the @@ -415,12 +415,13 @@ def _add_truth( """ # Write attributes, either target labels, truth info or original # features. + for truth_dict in truth_dicts: for key, value in truth_dict.items(): try: label = torch.tensor(value) if self._repeat_labels: - label = label.repeat(graph.x.shape[0],1) + label = label.repeat(graph.x.shape[0], 1) graph[key] = label except TypeError: # Cannot convert `value` to Tensor due to its data type, @@ -460,6 +461,6 @@ def _add_custom_labels( for key, fn in custom_label_functions.items(): label = fn(graph) if self._repeat_labels: - label = label.repeat(graph.x.shape[0],1) + label = label.repeat(graph.x.shape[0], 1) graph[key] = label return graph diff --git a/src/graphnet/models/graphs/graphs.py b/src/graphnet/models/graphs/graphs.py index 6e2ac086d..525675ca7 100644 --- a/src/graphnet/models/graphs/graphs.py +++ b/src/graphnet/models/graphs/graphs.py @@ -1,6 +1,6 @@ """A module containing different graph representations in GraphNeT.""" -from typing import List, Optional, Dict, Union +from typing import List, Optional, Dict, Union, Any import torch from numpy.random import Generator @@ -23,7 +23,7 @@ def __init__( seed: Optional[Union[int, Generator]] = None, nb_nearest_neighbours: int = 8, columns: List[int] = [0, 1, 2], - **kwargs + **kwargs: Any, ) -> None: """Construct k-nn graph representation. @@ -54,7 +54,7 @@ def __init__( input_feature_names=input_feature_names, perturbation_dict=perturbation_dict, seed=seed, - **kwargs + **kwargs, ) @@ -72,7 +72,7 @@ def __init__( dtype: Optional[torch.dtype] = torch.float, perturbation_dict: Optional[Dict[str, float]] = None, seed: Optional[Union[int, Generator]] = None, - **kwargs + **kwargs: Any, ) -> None: """Construct isolated nodes graph representation. @@ -97,5 +97,5 @@ def __init__( input_feature_names=input_feature_names, perturbation_dict=perturbation_dict, seed=seed, - **kwargs + **kwargs, ) From e299aac4db008c066d35d1a6673e47a2c85c20f4 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 09:02:19 +0200 Subject: [PATCH 11/32] polish dtype assignment --- src/graphnet/models/normalizing_flow.py | 31 +++++++++++++------------ src/graphnet/models/task/task.py | 7 +++--- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index f84caa881..2d351bd5a 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -69,30 +69,31 @@ def __init__( self.backbone = backbone self._condition_on = condition_on - def forward( - self, data: Union[Data, List[Data]] - ) -> List[Union[Tensor, Data]]: + def forward(self, data: Union[Data, List[Data]]) -> Tensor: """Forward pass, chaining model components.""" - if self.backbone is not None: - x = self._backbone(data) - elif self._condition_on is not None: - assert isinstance(self._condition_on, list) - x = get_fields(data=data, fields=self._condition_on) - return self._tasks[0](x, data) - - def _backbone( - self, data: Union[Data, List[Data]] - ) -> List[Union[Tensor, Data]]: - assert self.backbone is not None if isinstance(data, Data): data = [data] x_list = [] for d in data: - x = self.backbone(d) + if self.backbone is not None: + x = self._backbone(d) + elif self._condition_on is not None: + assert isinstance(self._condition_on, list) + x = get_fields(data=d, fields=self._condition_on) + else: + # Unconditional flow + x = None + x = self._tasks[0](x, d) x_list.append(x) x = torch.cat(x_list, dim=0) return x + def _backbone( + self, data: Union[Data, List[Data]] + ) -> List[Union[Tensor, Data]]: + assert self.backbone is not None + return self.backbone(data) + def shared_step(self, batch: List[Data], batch_idx: int) -> Tensor: """Perform shared step. diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index bb1842191..99514b11d 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -423,7 +423,7 @@ def nb_inputs(self) -> Union[int, None]: # type: ignore """Return number of conditional inputs assumed by task.""" return self._hidden_size - def _forward(self, x: Tensor, y: Tensor) -> Tensor: # type: ignore + def _forward(self, x: Optional[Tensor], y: Tensor) -> Tensor: # type: ignore if x is not None: if x.shape[0] != y.shape[0]: raise AssertionError( @@ -443,9 +443,10 @@ def forward( ) -> Union[Tensor, Data]: """Forward pass.""" # Manually cast pdf to correct dtype - is there a better way? - self._flow = self._flow.to(x.dtype) + self._flow = self._flow.to(self.dtype) # Get target values - labels = get_fields(data=data, fields=self._target_labels).to(x.dtype) + labels = get_fields(data=data, fields=self._target_labels) + labels = labels.to(self.dtype) # Set the initial parameters of flow close to truth # This speeds up training and helps with NaN if self._initialized is False: From 9c0ad64980f5034cb2d9fa3ff857502b43c90ca8 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 09:24:36 +0200 Subject: [PATCH 12/32] add warning --- src/graphnet/models/normalizing_flow.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index 2d351bd5a..a52a54e66 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -36,6 +36,16 @@ def __init__( scheduler_config: Optional[Dict] = None, ) -> None: """Construct `NormalizingFlow`.""" + # Checks + if (backbone is not None) & (condition_on is not None): + # If user wants to condition on both + raise ValueError( + f"{self.__class__.__name__} got values for both " + "`backbone` and `condition_on`, but can only" + "condition on one of those. Please specify just " + "one of these arguments." + ) + # Handle args if backbone is not None: assert isinstance(backbone, GNN) From f53bc1dc0c0f2b97d31f72827ed6a2a756478df5 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 09:46:48 +0200 Subject: [PATCH 13/32] add check for flow package --- src/graphnet/models/__init__.py | 7 ++++--- src/graphnet/models/task/task.py | 6 ++++-- src/graphnet/utilities/imports.py | 14 ++++++++++++++ 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/src/graphnet/models/__init__.py b/src/graphnet/models/__init__.py index a7e0a064b..12d4cbcc5 100644 --- a/src/graphnet/models/__init__.py +++ b/src/graphnet/models/__init__.py @@ -6,9 +6,10 @@ existing, purpose-built components and chain them together to form a complete GNN """ - - +from graphnet.utilities.imports import has_jammy_flows_package from .model import Model from .standard_model import StandardModel from .standard_averaged_model import StandardAveragedModel -from .normalizing_flow import NormalizingFlow \ No newline at end of file + +if has_jammy_flows_package(): + from .normalizing_flow import NormalizingFlow diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index 99514b11d..441484838 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -10,8 +10,6 @@ from torch import Tensor from torch.nn import Linear from torch_geometric.data import Data -import jammy_flows -from torch.distributions.uniform import Uniform if TYPE_CHECKING: # Avoid cyclic dependency @@ -20,6 +18,10 @@ from graphnet.models import Model from graphnet.utilities.decorators import final from graphnet.models.utils import get_fields +from graphnet.utilities.imports import has_jammy_flows_package + +if has_jammy_flows_package(): + import jammy_flows class Task(Model): diff --git a/src/graphnet/utilities/imports.py b/src/graphnet/utilities/imports.py index a490f413c..ae59d3b98 100644 --- a/src/graphnet/utilities/imports.py +++ b/src/graphnet/utilities/imports.py @@ -33,6 +33,20 @@ def has_torch_package() -> bool: return False +def has_jammy_flows_package() -> bool: + """Check if the `jammy_flows` package is available.""" + try: + import jammmy_flows # pyright: reportMissingImports=false + + return True + except ImportError: + Logger(log_folder=None).warning_once( + "`jammy_flows` not available. Normalizing Flow functionality is " + "missing." + ) + return False + + def requires_icecube(test_function: Callable) -> Callable: """Decorate `test_function` for use only if `icecube` module is present.""" From a0afcc3e547b6910190196b649cd541d15447832 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 09:57:24 +0200 Subject: [PATCH 14/32] expand docstrings --- src/graphnet/models/normalizing_flow.py | 10 +++++----- src/graphnet/models/task/task.py | 17 +++++++++++------ 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index a52a54e66..e0f11be3e 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -14,12 +14,12 @@ class NormalizingFlow(EasySyntax): - """A Standard way of combining model components in GraphNeT. + """A model for building (conditional) normalizing flows in GraphNeT. - This model is compatible with the vast majority of supervised learning - tasks such as regression, binary and multi-label classification. - - Capable of producing both event-level and pulse-level predictions. + This model relies on `jammy_flows` for building and evaluating + normalizing flows. + https://thoglu.github.io/jammy_flows/usage/introduction.html + for details. """ def __init__( diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index 441484838..45f6f8a15 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -386,7 +386,11 @@ def _forward(self, x: Union[Tensor, Data]) -> Tensor: # type: ignore class StandardFlowTask(Task): - """A `Task` for `NormalizingFlow`s in GraphNeT.""" + """A `Task` for `NormalizingFlow`s in GraphNeT. + + This Task requires the support package`jammy_flows` for constructing and + evaluating normalizing flows. + """ def __init__( self, @@ -394,14 +398,15 @@ def __init__( flow_layers: str = "gggt", **task_kwargs: Any, ): - """Construct `StandardLearnedTask`. + """Construct `StandardFlowTask`. Args: target_labels: A list of names for the targets of this Task. - flow_layers: A string indicating the flow layer types. - hidden_size: The number of columns in the output of - the last latent layer of `Model` using this Task. - Available through `Model.nb_outputs` + flow_layers: A string indicating the flow layer types. See + https://thoglu.github.io/jammy_flows/usage/introduction.html + for details. + hidden_size: The number of columns on which the normalizing flow + is conditioned on. May be `None`, indicating non-conditional flow. """ # Base class constructor From 845293d029b677d8c0998b4aa6f3e2979b7cf29a Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 10:04:43 +0200 Subject: [PATCH 15/32] update workflow to install jammy_flows --- .github/actions/install/action.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/actions/install/action.yml b/.github/actions/install/action.yml index b2d6d2896..19e23be01 100644 --- a/.github/actions/install/action.yml +++ b/.github/actions/install/action.yml @@ -38,4 +38,5 @@ runs: run: | echo requirements/torch_${{ inputs.hardware }}.txt ${{ env.PIP_FLAGS }} .${{ inputs.extras }} pip install -r requirements/torch_${{ inputs.hardware }}.txt ${{ env.PIP_FLAGS }} .${{ inputs.extras }} + pip install git+https://github.com/thoglu/jammy_flows.git shell: bash From a71765cc30c919c74fa937cd748db76e618ac4bd Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 10:30:53 +0200 Subject: [PATCH 16/32] add example --- .../04_training/07_train_normalizing_flow.py | 225 ++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 examples/04_training/07_train_normalizing_flow.py diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py new file mode 100644 index 000000000..e6de97920 --- /dev/null +++ b/examples/04_training/07_train_normalizing_flow.py @@ -0,0 +1,225 @@ +"""Example of training a conditional NormalizingFlow.""" + +import os +from typing import Any, Dict, List, Optional + +from pytorch_lightning.loggers import WandbLogger +import torch +from torch.optim.adam import Adam + +from graphnet.constants import EXAMPLE_DATA_DIR, EXAMPLE_OUTPUT_DIR +from graphnet.data.constants import FEATURES, TRUTH +from graphnet.models import NormalizingFlow +from graphnet.models.detector.prometheus import Prometheus +from graphnet.models.gnn import DynEdge +from graphnet.models.graphs import KNNGraph +from graphnet.models.task.task import StandardFlowTask +from graphnet.training.callbacks import PiecewiseLinearLR +from graphnet.training.utils import make_train_validation_dataloader +from graphnet.utilities.argparse import ArgumentParser +from graphnet.utilities.logging import Logger + +# Constants +features = FEATURES.PROMETHEUS +truth = TRUTH.PROMETHEUS + + +def main( + path: str, + pulsemap: str, + target: str, + truth_table: str, + gpus: Optional[List[int]], + max_epochs: int, + early_stopping_patience: int, + batch_size: int, + num_workers: int, + wandb: bool = False, +) -> None: + """Run example.""" + # Construct Logger + logger = Logger() + + # Initialise Weights & Biases (W&B) run + if wandb: + # Make sure W&B output directory exists + wandb_dir = "./wandb/" + os.makedirs(wandb_dir, exist_ok=True) + wandb_logger = WandbLogger( + project="example-script", + entity="graphnet-team", + save_dir=wandb_dir, + log_model=True, + ) + + logger.info(f"features: {features}") + logger.info(f"truth: {truth}") + + # Configuration + config: Dict[str, Any] = { + "path": path, + "pulsemap": pulsemap, + "batch_size": batch_size, + "num_workers": num_workers, + "target": target, + "early_stopping_patience": early_stopping_patience, + "fit": { + "gpus": gpus, + "max_epochs": max_epochs, + }, + } + + archive = os.path.join(EXAMPLE_OUTPUT_DIR, "train_model_without_configs") + run_name = "dynedge_{}_example".format(config["target"]) + if wandb: + # Log configuration to W&B + wandb_logger.experiment.config.update(config) + + # Define graph representation + graph_definition = KNNGraph(detector=Prometheus()) + + ( + training_dataloader, + validation_dataloader, + ) = make_train_validation_dataloader( + db=config["path"], + graph_definition=graph_definition, + pulsemaps=config["pulsemap"], + features=features, + truth=truth, + batch_size=config["batch_size"], + num_workers=config["num_workers"], + truth_table=truth_table, + selection=None, + ) + + # Building model + + backbone = DynEdge( + nb_inputs=graph_definition.nb_outputs, + global_pooling_schemes=["min", "max", "mean", "sum"], + ) + + model = NormalizingFlow( + graph_definition=graph_definition, + backbone=backbone, + optimizer_class=Adam, + target_labels=config["target"], + optimizer_kwargs={"lr": 1e-03, "eps": 1e-03}, + scheduler_class=PiecewiseLinearLR, + scheduler_kwargs={ + "milestones": [ + 0, + len(training_dataloader) / 2, + len(training_dataloader) * config["fit"]["max_epochs"], + ], + "factors": [1e-2, 1, 1e-02], + }, + scheduler_config={ + "interval": "step", + }, + ) + + # Training model + model.fit( + training_dataloader, + validation_dataloader, + early_stopping_patience=config["early_stopping_patience"], + logger=wandb_logger if wandb else None, + **config["fit"], + ) + + # Get predictions + additional_attributes = model.target_labels + assert isinstance(additional_attributes, list) # mypy + + results = model.predict_as_dataframe( + validation_dataloader, + additional_attributes=additional_attributes + ["event_no"], + gpus=config["fit"]["gpus"], + ) + + # Save predictions and model to file + db_name = path.split("/")[-1].split(".")[0] + path = os.path.join(archive, db_name, run_name) + logger.info(f"Writing results to {path}") + os.makedirs(path, exist_ok=True) + + # Save results as .csv + results.to_csv(f"{path}/results.csv") + + # Save full model (including weights) to .pth file - not version safe + # Note: Models saved as .pth files in one version of graphnet + # may not be compatible with a different version of graphnet. + model.save(f"{path}/model.pth") + + # Save model config and state dict - Version safe save method. + # This method of saving models is the safest way. + model.save_state_dict(f"{path}/state_dict.pth") + model.save_config(f"{path}/model_config.yml") + + +if __name__ == "__main__": + + # Parse command-line arguments + parser = ArgumentParser( + description=""" +Train conditional NormalizingFlow without the use of config files. +""" + ) + + parser.add_argument( + "--path", + help="Path to dataset file (default: %(default)s)", + default=f"{EXAMPLE_DATA_DIR}/sqlite/prometheus/prometheus-events.db", + ) + + parser.add_argument( + "--pulsemap", + help="Name of pulsemap to use (default: %(default)s)", + default="total", + ) + + parser.add_argument( + "--target", + help=( + "Name of feature to use as regression target (default: " + "%(default)s)" + ), + default="total_energy", + ) + + parser.add_argument( + "--truth-table", + help="Name of truth table to be used (default: %(default)s)", + default="mc_truth", + ) + + parser.with_standard_arguments( + "gpus", + ("max-epochs", 1), + "early-stopping-patience", + ("batch-size", 16), + "num-workers", + ) + + parser.add_argument( + "--wandb", + action="store_true", + help="If True, Weights & Biases are used to track the experiment.", + ) + + args, unknown = parser.parse_known_args() + + main( + args.path, + args.pulsemap, + args.target, + args.truth_table, + args.gpus, + args.max_epochs, + args.early_stopping_patience, + args.batch_size, + args.num_workers, + args.wandb, + ) From eb159328d789eac623cead20e5eef5be6be36de3 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 10:33:15 +0200 Subject: [PATCH 17/32] check in example --- examples/04_training/07_train_normalizing_flow.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py index e6de97920..187e30dfc 100644 --- a/examples/04_training/07_train_normalizing_flow.py +++ b/examples/04_training/07_train_normalizing_flow.py @@ -13,11 +13,21 @@ from graphnet.models.detector.prometheus import Prometheus from graphnet.models.gnn import DynEdge from graphnet.models.graphs import KNNGraph -from graphnet.models.task.task import StandardFlowTask from graphnet.training.callbacks import PiecewiseLinearLR from graphnet.training.utils import make_train_validation_dataloader from graphnet.utilities.argparse import ArgumentParser from graphnet.utilities.logging import Logger +from graphnet.utilities.imports import has_jammy_flows_package + +# Make sure the jammy flows is installed +try: + assert has_jammy_flows_package +except AssertionError: + raise AssertionError( + "This example requires the package`jammy_flow` " + " to be installed. It appears that the package is " + " not installed. Please install the package." + ) # Constants features = FEATURES.PROMETHEUS From 4150f144de8f145f1adfbb2cc61a24ac3617a845 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 11:36:08 +0200 Subject: [PATCH 18/32] update example --- examples/04_training/07_train_normalizing_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py index 187e30dfc..c94c2821f 100644 --- a/examples/04_training/07_train_normalizing_flow.py +++ b/examples/04_training/07_train_normalizing_flow.py @@ -21,7 +21,7 @@ # Make sure the jammy flows is installed try: - assert has_jammy_flows_package + assert has_jammy_flows_package() except AssertionError: raise AssertionError( "This example requires the package`jammy_flow` " From 8116c29ae4cffe463f714c8cc5719ea7e4c43cc1 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 11:53:15 +0200 Subject: [PATCH 19/32] update example --- examples/04_training/07_train_normalizing_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py index c94c2821f..1de4b349b 100644 --- a/examples/04_training/07_train_normalizing_flow.py +++ b/examples/04_training/07_train_normalizing_flow.py @@ -9,7 +9,6 @@ from graphnet.constants import EXAMPLE_DATA_DIR, EXAMPLE_OUTPUT_DIR from graphnet.data.constants import FEATURES, TRUTH -from graphnet.models import NormalizingFlow from graphnet.models.detector.prometheus import Prometheus from graphnet.models.gnn import DynEdge from graphnet.models.graphs import KNNGraph @@ -22,6 +21,7 @@ # Make sure the jammy flows is installed try: assert has_jammy_flows_package() + from graphnet.models import NormalizingFlow except AssertionError: raise AssertionError( "This example requires the package`jammy_flow` " From d51f02c108a2994e1d5db40c62f93791279c7f11 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 12:00:41 +0200 Subject: [PATCH 20/32] actions --- .github/actions/install/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/install/action.yml b/.github/actions/install/action.yml index 19e23be01..2941789d4 100644 --- a/.github/actions/install/action.yml +++ b/.github/actions/install/action.yml @@ -38,5 +38,5 @@ runs: run: | echo requirements/torch_${{ inputs.hardware }}.txt ${{ env.PIP_FLAGS }} .${{ inputs.extras }} pip install -r requirements/torch_${{ inputs.hardware }}.txt ${{ env.PIP_FLAGS }} .${{ inputs.extras }} - pip install git+https://github.com/thoglu/jammy_flows.git + pip install git+https://github.com/thoglu/jammy_flows.git ${{ env.PIP_FLAGS }} shell: bash From 210ef2847498a80a1153f919a239af0bac01dad0 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 12:02:07 +0200 Subject: [PATCH 21/32] update icetray action --- .github/workflows/build.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8f2762e77..a17bf4b8f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -63,6 +63,14 @@ jobs: uses: ./.github/actions/install with: editable: true + - name: Print packages in pip + run: | + pip show torch + pip show torch-geometric + pip show torch-cluster + pip show torch-sparse + pip show torch-scatter + pip show jammy_flows - name: Run unit tests and generate coverage report run: | coverage run --source=graphnet -m pytest tests/ --ignore=tests/examples/04_training --ignore=tests/utilities @@ -109,6 +117,7 @@ jobs: pip show torch-cluster pip show torch-sparse pip show torch-scatter + pip show jammy_flows - name: Run unit tests and generate coverage report run: | set -o pipefail # To propagate exit code from pytest From c32ffd107fcfe8bfaf2b1251d7cacd358944181a Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 12:07:49 +0200 Subject: [PATCH 22/32] update install action --- .github/actions/install/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/install/action.yml b/.github/actions/install/action.yml index 2941789d4..19e23be01 100644 --- a/.github/actions/install/action.yml +++ b/.github/actions/install/action.yml @@ -38,5 +38,5 @@ runs: run: | echo requirements/torch_${{ inputs.hardware }}.txt ${{ env.PIP_FLAGS }} .${{ inputs.extras }} pip install -r requirements/torch_${{ inputs.hardware }}.txt ${{ env.PIP_FLAGS }} .${{ inputs.extras }} - pip install git+https://github.com/thoglu/jammy_flows.git ${{ env.PIP_FLAGS }} + pip install git+https://github.com/thoglu/jammy_flows.git shell: bash From 59870dd5e4eb1525291c713f7fe97d423733a354 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 12:17:02 +0200 Subject: [PATCH 23/32] fix `has_jammy_flows_package` --- src/graphnet/utilities/imports.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/graphnet/utilities/imports.py b/src/graphnet/utilities/imports.py index ae59d3b98..1c143280a 100644 --- a/src/graphnet/utilities/imports.py +++ b/src/graphnet/utilities/imports.py @@ -36,7 +36,7 @@ def has_torch_package() -> bool: def has_jammy_flows_package() -> bool: """Check if the `jammy_flows` package is available.""" try: - import jammmy_flows # pyright: reportMissingImports=false + import jammy_flows # pyright: reportMissingImports=false return True except ImportError: From b953ff4dece1dac067190683a79cbda0a5b3cf5a Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 17:16:12 +0200 Subject: [PATCH 24/32] polish --- src/graphnet/models/normalizing_flow.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index e0f11be3e..3f61ea294 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -96,7 +96,7 @@ def forward(self, data: Union[Data, List[Data]]) -> Tensor: x = self._tasks[0](x, d) x_list.append(x) x = torch.cat(x_list, dim=0) - return x + return [x] def _backbone( self, data: Union[Data, List[Data]] @@ -111,6 +111,9 @@ def shared_step(self, batch: List[Data], batch_idx: int) -> Tensor: between the training and validation step. """ loss = self(batch) + if isinstance(loss, list): + assert len(loss) == 1 + loss = loss[0] return torch.mean(loss, dim=0) def validate_tasks(self) -> None: From 5ab298a8579fa99fc019753866cda9810177a880 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 17:30:04 +0200 Subject: [PATCH 25/32] add doc string --- src/graphnet/models/normalizing_flow.py | 32 ++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index 3f61ea294..528001ac8 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -35,7 +35,37 @@ def __init__( scheduler_kwargs: Optional[Dict] = None, scheduler_config: Optional[Dict] = None, ) -> None: - """Construct `NormalizingFlow`.""" + """Build NormalizingFlow to learn (conditional) normalizing flows. + + NormalizingFlow is able to build, train and evaluate a wide suite of + normalizing flows. Instead of optimizing a loss function, flows + minimize a learned pdf of your data, providing you with a posterior + distribution for every example instead of point-like predictions. + + `NormalizingFlow` can be conditioned on existing fields in the + DataRepresentation or latent representations from `Models`. + + Args: + graph_definition: The `GraphDefinition` to train the model on. + target_labels: Name of target(s) to learn the pdf of. + backbone: Architecture used to produce latent representations of + the input data on which the pdf will be conditioned. + Defaults to None. + condition_on: List of fields in Data objects to condition the + pdf on. Defaults to None. + flow_layers: A string defining the flow layers. + See https://thoglu.github.io/jammy_flows/usage/introduction.html + for details. Defaults to "gggt". + optimizer_class: Optimizer to use. Defaults to Adam. + optimizer_kwargs: Optimzier arguments. Defaults to None. + scheduler_class: Learning rate scheduler to use. Defaults to None. + scheduler_kwargs: Arguments to learning rate scheduler. + Defaults to None. + scheduler_config: Defaults to None. + + Raises: + ValueError: if both `backbone` and `condition_on` is specified. + """ # Checks if (backbone is not None) & (condition_on is not None): # If user wants to condition on both From 3bc33a3718106bfcba66896bf9c5f79dc708fc84 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 17:53:43 +0200 Subject: [PATCH 26/32] update docstring --- src/graphnet/models/normalizing_flow.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index 528001ac8..59e6e3961 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -45,6 +45,9 @@ def __init__( `NormalizingFlow` can be conditioned on existing fields in the DataRepresentation or latent representations from `Models`. + NormalizingFlow is built upon https://github.com/thoglu/jammy_flows, + and we refer to their documentation for details on the flows. + Args: graph_definition: The `GraphDefinition` to train the model on. target_labels: Name of target(s) to learn the pdf of. From b74d9b2658c7889eeeed3b443686551047f44690 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Wed, 29 May 2024 17:56:44 +0200 Subject: [PATCH 27/32] update installation instruction --- docs/source/installation/quick-start.html | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/installation/quick-start.html b/docs/source/installation/quick-start.html index aff34659e..e80fd5b8d 100644 --- a/docs/source/installation/quick-start.html +++ b/docs/source/installation/quick-start.html @@ -107,20 +107,20 @@ } if (os == "linux" && cuda != "cpu" && torch != "no_torch"){ - $("#command pre").text(`git clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_${$("#command").attr("cuda")}.txt -e .[torch,develop]`); + $("#command pre").text(`git clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_${$("#command").attr("cuda")}.txt -e .[torch,develop]\n\n#Optionally, install jammy_flows for normalizing flow support:\npip install git+https://github.com/thoglu/jammy_flows.git`); } else if (os == "linux" && cuda == "cpu" && torch != "no_torch"){ - $("#command pre").text(`git clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_${$("#command").attr("cuda")}.txt -e .[torch,develop]`); + $("#command pre").text(`git clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_${$("#command").attr("cuda")}.txt -e .[torch,develop]\n\n#Optionally, install jammy_flows for normalizing flow support:\npip install git+https://github.com/thoglu/jammy_flows.git`); } else if (os == "linux" && cuda == "cpu" && torch == "no_torch"){ - $("#command pre").text(`# Installations without PyTorch are intended for file conversion only\ngit clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_${$("#command").attr("cuda")}.txt -e .[develop]`); + $("#command pre").text(`# Installations without PyTorch are intended for file conversion only\ngit clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_${$("#command").attr("cuda")}.txt -e .[develop]\n\n#Optionally, install jammy_flows for normalizing flow support:\npip install git+https://github.com/thoglu/jammy_flows.git`); } if (os == "macos" && cuda == "cpu" && torch != "no_torch"){ - $("#command pre").text(`git clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_macos.txt -e .[torch,develop]`); + $("#command pre").text(`git clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_macos.txt -e .[torch,develop]\n\n#Optionally, install jammy_flows for normalizing flow support:\npip install git+https://github.com/thoglu/jammy_flows.git`); } if (os == "macos" && cuda == "cpu" && torch == "no_torch"){ - $("#command pre").text(`# Installations without PyTorch are intended for file conversion only\ngit clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_macos.txt -e .[develop]`); + $("#command pre").text(`# Installations without PyTorch are intended for file conversion only\ngit clone https://github.com/graphnet-team/graphnet.git\ncd graphnet\n\npip install -r requirements/torch_macos.txt -e .[develop]\n\n#Optionally, install jammy_flows for normalizing flow support:\npip install git+https://github.com/thoglu/jammy_flows.git`); } } From 49576cb323aeeb240fef931f5c3d488060818569 Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Sat, 13 Jul 2024 10:09:03 +0200 Subject: [PATCH 28/32] add normalization --- src/graphnet/models/normalizing_flow.py | 2 ++ src/graphnet/models/task/task.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/src/graphnet/models/normalizing_flow.py b/src/graphnet/models/normalizing_flow.py index 59e6e3961..d62cf7c42 100644 --- a/src/graphnet/models/normalizing_flow.py +++ b/src/graphnet/models/normalizing_flow.py @@ -111,6 +111,7 @@ def __init__( self._graph_definition = graph_definition self.backbone = backbone self._condition_on = condition_on + self._norm = torch.nn.BatchNorm1d(hidden_size) def forward(self, data: Union[Data, List[Data]]) -> Tensor: """Forward pass, chaining model components.""" @@ -120,6 +121,7 @@ def forward(self, data: Union[Data, List[Data]]) -> Tensor: for d in data: if self.backbone is not None: x = self._backbone(d) + x = self._norm(x) elif self._condition_on is not None: assert isinstance(self._condition_on, list) x = get_fields(data=d, fields=self._condition_on) diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index 45f6f8a15..a1c3c52ed 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -396,6 +396,7 @@ def __init__( self, hidden_size: Union[int, None], flow_layers: str = "gggt", + target_norm: float = 1000.0, **task_kwargs: Any, ): """Construct `StandardFlowTask`. @@ -405,6 +406,8 @@ def __init__( flow_layers: A string indicating the flow layer types. See https://thoglu.github.io/jammy_flows/usage/introduction.html for details. + target_norm: A normalization constant used to divide the target + values. Value is applied to all targets. Defaults to 1000. hidden_size: The number of columns on which the normalizing flow is conditioned on. May be `None`, indicating non-conditional flow. """ @@ -420,6 +423,7 @@ def __init__( conditional_input_dim=hidden_size, ) self._initialized = False + self._norm = target_norm @property def default_prediction_labels(self) -> List[str]: @@ -431,6 +435,7 @@ def nb_inputs(self) -> Union[int, None]: # type: ignore return self._hidden_size def _forward(self, x: Optional[Tensor], y: Tensor) -> Tensor: # type: ignore + y = y / self._norm if x is not None: if x.shape[0] != y.shape[0]: raise AssertionError( From 53eefb46c1de0432b72d985a3a46545e3fb65c7f Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 6 Aug 2024 09:52:26 +0200 Subject: [PATCH 29/32] increase batch size to avoid single event batch --- examples/04_training/07_train_normalizing_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py index 1de4b349b..baa3eec85 100644 --- a/examples/04_training/07_train_normalizing_flow.py +++ b/examples/04_training/07_train_normalizing_flow.py @@ -209,7 +209,7 @@ def main( "gpus", ("max-epochs", 1), "early-stopping-patience", - ("batch-size", 16), + ("batch-size", 50), "num-workers", ) From dfee76d90be66a02675f35016025b8dbb2130f4e Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Tue, 6 Aug 2024 09:53:48 +0200 Subject: [PATCH 30/32] revert change --- examples/04_training/07_train_normalizing_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py index baa3eec85..1de4b349b 100644 --- a/examples/04_training/07_train_normalizing_flow.py +++ b/examples/04_training/07_train_normalizing_flow.py @@ -209,7 +209,7 @@ def main( "gpus", ("max-epochs", 1), "early-stopping-patience", - ("batch-size", 50), + ("batch-size", 16), "num-workers", ) From dd416599a417d035477b5d0c8a8759d3b0f813f8 Mon Sep 17 00:00:00 2001 From: Rasmus Oersoe Date: Tue, 6 Aug 2024 09:55:34 +0200 Subject: [PATCH 31/32] increase batch size --- examples/04_training/07_train_normalizing_flow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/04_training/07_train_normalizing_flow.py b/examples/04_training/07_train_normalizing_flow.py index 1de4b349b..baa3eec85 100644 --- a/examples/04_training/07_train_normalizing_flow.py +++ b/examples/04_training/07_train_normalizing_flow.py @@ -209,7 +209,7 @@ def main( "gpus", ("max-epochs", 1), "early-stopping-patience", - ("batch-size", 16), + ("batch-size", 50), "num-workers", ) From 9aa693624065150901d2cc1d8b7d6e73e53e6a3f Mon Sep 17 00:00:00 2001 From: RasmusOrsoe Date: Mon, 16 Sep 2024 15:54:05 +0200 Subject: [PATCH 32/32] only initialize if training --- src/graphnet/models/task/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/graphnet/models/task/task.py b/src/graphnet/models/task/task.py index a1c3c52ed..0b9101107 100644 --- a/src/graphnet/models/task/task.py +++ b/src/graphnet/models/task/task.py @@ -461,7 +461,7 @@ def forward( labels = labels.to(self.dtype) # Set the initial parameters of flow close to truth # This speeds up training and helps with NaN - if self._initialized is False: + if (self._initialized is False) & (self.training): self._flow.init_params(data=deepcopy(labels).cpu()) self._flow.to(self.device) self._initialized = True # This is only done once