From 27c8525d008df1ceed07f7e12f14313833f1aed8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20S=C3=B8gaard?= Date: Thu, 5 Oct 2023 01:10:48 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20Aske-Ros?= =?UTF-8?q?ted/graphnet@211e0c1ee75405d81d1f36f7e2761984ab20c96f=20?= =?UTF-8?q?=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- _modules/graphnet/data/dataloader.html | 457 ------ _modules/graphnet/data/dataset/dataset.html | 1089 ------------- .../data/dataset/parquet/parquet_dataset.html | 500 ------ .../data/dataset/sqlite/sqlite_dataset.html | 515 ------- _modules/graphnet/data/pipeline.html | 593 -------- .../deployment/i3modules/graphnet_module.html | 817 ---------- _modules/graphnet/models/coarsening.html | 708 --------- .../graphnet/models/components/layers.html | 579 ------- _modules/graphnet/models/components/pool.html | 656 -------- .../graphnet/models/detector/detector.html | 419 ----- .../graphnet/models/detector/icecube.html | 528 ------- .../graphnet/models/detector/prometheus.html | 395 ----- _modules/graphnet/models/gnn/convnet.html | 484 ------ _modules/graphnet/models/gnn/dynedge.html | 691 --------- .../graphnet/models/gnn/dynedge_jinst.html | 519 ------- .../models/gnn/dynedge_kaggle_tito.html | 616 -------- _modules/graphnet/models/gnn/gnn.html | 401 ----- .../graphnet/models/graphs/edges/edges.html | 559 ------- .../models/graphs/graph_definition.html | 690 --------- _modules/graphnet/models/graphs/graphs.html | 419 ----- .../graphnet/models/graphs/nodes/nodes.html | 442 ------ _modules/graphnet/models/model.html | 732 --------- _modules/graphnet/models/standard_model.html | 602 -------- .../graphnet/models/task/classification.html | 411 ----- .../graphnet/models/task/reconstruction.html | 609 -------- _modules/graphnet/models/task/task.html | 685 --------- _modules/graphnet/models/utils.html | 430 ------ _modules/graphnet/training/labels.html | 436 ------ .../graphnet/training/loss_functions.html | 856 ----------- _modules/graphnet/training/utils.html | 656 -------- _modules/index.html | 30 - api/graphnet.data.dataloader.html | 127 +- api/graphnet.data.dataset.dataset.html | 333 +--- api/graphnet.data.dataset.html | 14 +- api/graphnet.data.dataset.parquet.html | 10 +- ....data.dataset.parquet.parquet_dataset.html | 119 +- api/graphnet.data.dataset.sqlite.html | 10 +- ...et.data.dataset.sqlite.sqlite_dataset.html | 119 +- api/graphnet.data.html | 12 +- api/graphnet.data.pipeline.html | 53 +- ...a.utilities.string_selection_resolver.html | 2 +- ....deployment.i3modules.graphnet_module.html | 133 +- api/graphnet.deployment.i3modules.html | 7 +- api/graphnet.models.coarsening.html | 233 +-- api/graphnet.models.components.html | 26 +- api/graphnet.models.components.layers.html | 251 +-- api/graphnet.models.components.pool.html | 337 +---- api/graphnet.models.detector.detector.html | 96 +- api/graphnet.models.detector.html | 23 +- api/graphnet.models.detector.icecube.html | 231 +-- api/graphnet.models.detector.prometheus.html | 69 +- api/graphnet.models.gnn.convnet.html | 79 +- api/graphnet.models.gnn.dynedge.html | 101 +- api/graphnet.models.gnn.dynedge_jinst.html | 76 +- ...aphnet.models.gnn.dynedge_kaggle_tito.html | 86 +- api/graphnet.models.gnn.gnn.html | 104 +- api/graphnet.models.gnn.html | 30 +- api/graphnet.models.graphs.edges.edges.html | 179 +-- api/graphnet.models.graphs.edges.html | 16 +- ...aphnet.models.graphs.graph_definition.html | 105 +- api/graphnet.models.graphs.graphs.html | 59 +- api/graphnet.models.graphs.html | 18 +- api/graphnet.models.graphs.nodes.html | 14 +- api/graphnet.models.graphs.nodes.nodes.html | 148 +- api/graphnet.models.html | 37 +- api/graphnet.models.model.html | 303 +--- api/graphnet.models.standard_model.html | 343 +---- api/graphnet.models.task.classification.html | 271 +--- api/graphnet.models.task.html | 34 +- api/graphnet.models.task.reconstruction.html | 1348 +---------------- api/graphnet.models.task.task.html | 306 +--- api/graphnet.models.utils.html | 108 +- api/graphnet.training.html | 30 +- api/graphnet.training.labels.html | 91 +- api/graphnet.training.loss_functions.html | 542 +------ api/graphnet.training.utils.html | 189 +-- genindex.html | 972 +----------- objects.inv | Bin 6190 -> 4137 bytes py-modindex.html | 205 --- searchindex.js | 2 +- sitemap.xml | 2 +- 81 files changed, 178 insertions(+), 25349 deletions(-) delete mode 100644 _modules/graphnet/data/dataloader.html delete mode 100644 _modules/graphnet/data/dataset/dataset.html delete mode 100644 _modules/graphnet/data/dataset/parquet/parquet_dataset.html delete mode 100644 _modules/graphnet/data/dataset/sqlite/sqlite_dataset.html delete mode 100644 _modules/graphnet/data/pipeline.html delete mode 100644 _modules/graphnet/deployment/i3modules/graphnet_module.html delete mode 100644 _modules/graphnet/models/coarsening.html delete mode 100644 _modules/graphnet/models/components/layers.html delete mode 100644 _modules/graphnet/models/components/pool.html delete mode 100644 _modules/graphnet/models/detector/detector.html delete mode 100644 _modules/graphnet/models/detector/icecube.html delete mode 100644 _modules/graphnet/models/detector/prometheus.html delete mode 100644 _modules/graphnet/models/gnn/convnet.html delete mode 100644 _modules/graphnet/models/gnn/dynedge.html delete mode 100644 _modules/graphnet/models/gnn/dynedge_jinst.html delete mode 100644 _modules/graphnet/models/gnn/dynedge_kaggle_tito.html delete mode 100644 _modules/graphnet/models/gnn/gnn.html delete mode 100644 _modules/graphnet/models/graphs/edges/edges.html delete mode 100644 _modules/graphnet/models/graphs/graph_definition.html delete mode 100644 _modules/graphnet/models/graphs/graphs.html delete mode 100644 _modules/graphnet/models/graphs/nodes/nodes.html delete mode 100644 _modules/graphnet/models/model.html delete mode 100644 _modules/graphnet/models/standard_model.html delete mode 100644 _modules/graphnet/models/task/classification.html delete mode 100644 _modules/graphnet/models/task/reconstruction.html delete mode 100644 _modules/graphnet/models/task/task.html delete mode 100644 _modules/graphnet/models/utils.html delete mode 100644 _modules/graphnet/training/labels.html delete mode 100644 _modules/graphnet/training/loss_functions.html delete mode 100644 _modules/graphnet/training/utils.html diff --git a/_modules/graphnet/data/dataloader.html b/_modules/graphnet/data/dataloader.html deleted file mode 100644 index 4de320872..000000000 --- a/_modules/graphnet/data/dataloader.html +++ /dev/null @@ -1,457 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataloader — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataloader

-"""Base `Dataloader` class(es) used in `graphnet`."""
-
-from typing import Any, Callable, Dict, List, Union
-
-import torch.utils.data
-from torch_geometric.data import Batch, Data
-
-from graphnet.data.dataset import Dataset
-from graphnet.utilities.config import DatasetConfig
-
-
-
-[docs] -def collate_fn(graphs: List[Data]) -> Batch: - """Remove graphs with less than two DOM hits. - - Should not occur in "production. - """ - graphs = [g for g in graphs if g.n_pulses > 1] - return Batch.from_data_list(graphs)
- - - -
-[docs] -def do_shuffle(selection_name: str) -> bool: - """Check whether to shuffle selection with name `selection_name`.""" - return "train" in selection_name.lower()
- - - -
-[docs] -class DataLoader(torch.utils.data.DataLoader): - """Class for loading data from a `Dataset`.""" - - def __init__( - self, - dataset: Dataset, - batch_size: int = 1, - shuffle: bool = False, - num_workers: int = 10, - persistent_workers: bool = True, - collate_fn: Callable = collate_fn, - prefetch_factor: int = 2, - **kwargs: Any, - ) -> None: - """Construct `DataLoader`.""" - # Base class constructor - super().__init__( - dataset, - batch_size=batch_size, - shuffle=shuffle, - num_workers=num_workers, - collate_fn=collate_fn, - persistent_workers=persistent_workers, - prefetch_factor=prefetch_factor, - **kwargs, - ) - -
-[docs] - @classmethod - def from_dataset_config( - cls, - config: DatasetConfig, - **kwargs: Any, - ) -> Union["DataLoader", Dict[str, "DataLoader"]]: - """Construct `DataLoader`s based on selections in `DatasetConfig`.""" - if isinstance(config.selection, dict): - assert "shuffle" not in kwargs, ( - "When passing a `DatasetConfig` with multiple selections, " - "`shuffle` is automatically inferred from the selection name, " - "and thus should not specified as an argument." - ) - datasets = Dataset.from_config(config) - assert isinstance(datasets, dict) - data_loaders: Dict[str, DataLoader] = {} - for name, dataset in datasets.items(): - data_loaders[name] = cls( - dataset, - shuffle=do_shuffle(name), - **kwargs, - ) - - return data_loaders - - else: - assert "shuffle" in kwargs, ( - "When passing a `DatasetConfig` with a single selections, you " - "need to specify `shuffle` as an argument." - ) - dataset = Dataset.from_config(config) - assert isinstance(dataset, Dataset) - return cls(dataset, **kwargs)
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/dataset/dataset.html b/_modules/graphnet/data/dataset/dataset.html deleted file mode 100644 index f82fb0dff..000000000 --- a/_modules/graphnet/data/dataset/dataset.html +++ /dev/null @@ -1,1089 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataset.dataset — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataset.dataset

-"""Base :py:class:`Dataset` class(es) used in GraphNeT."""
-
-from copy import deepcopy
-from abc import ABC, abstractmethod
-from typing import (
-    cast,
-    Any,
-    Callable,
-    Dict,
-    List,
-    Optional,
-    Tuple,
-    Union,
-    Iterable,
-    Type,
-)
-
-import numpy as np
-import torch
-from torch_geometric.data import Data
-
-from graphnet.constants import GRAPHNET_ROOT_DIR
-from graphnet.data.utilities.string_selection_resolver import (
-    StringSelectionResolver,
-)
-from graphnet.training.labels import Label
-from graphnet.utilities.config import (
-    Configurable,
-    DatasetConfig,
-    DatasetConfigSaverABCMeta,
-)
-from graphnet.utilities.config.parsing import traverse_and_apply
-from graphnet.utilities.logging import Logger
-from graphnet.models.graphs import GraphDefinition
-
-from graphnet.utilities.config.parsing import (
-    get_all_grapnet_classes,
-)
-
-
-
-[docs] -class ColumnMissingException(Exception): - """Exception to indicate a missing column in a dataset."""
- - - -
-[docs] -def load_module(class_name: str) -> Type: - """Load graphnet module from string name. - - Args: - class_name: name of class - - Returns: - graphnet module. - """ - # Get a lookup for all classes in `graphnet` - import graphnet.data - import graphnet.models - import graphnet.training - - namespace_classes = get_all_grapnet_classes( - graphnet.data, graphnet.models, graphnet.training - ) - return namespace_classes[class_name]
- - - -
-[docs] -def parse_graph_definition(cfg: dict) -> GraphDefinition: - """Construct GraphDefinition from DatasetConfig.""" - assert cfg["graph_definition"] is not None - - args = cfg["graph_definition"]["arguments"] - classes = {} - for arg in args.keys(): - if isinstance(args[arg], dict): - if "class_name" in args[arg].keys(): - classes[arg] = load_module(args[arg]["class_name"])( - **args[arg]["arguments"] - ) - if arg == "dtype": - args[arg] = eval(args[arg]) # converts string to class - - new_cfg = deepcopy(args) - new_cfg.update(classes) - graph_definition = load_module(cfg["graph_definition"]["class_name"])( - **new_cfg - ) - return graph_definition
- - - -
-[docs] -class Dataset( - Logger, - Configurable, - torch.utils.data.Dataset, - ABC, - metaclass=DatasetConfigSaverABCMeta, -): - """Base Dataset class for reading from any intermediate file format.""" - - # Class method(s) -
-[docs] - @classmethod - def from_config( # type: ignore[override] - cls, - source: Union[DatasetConfig, str], - ) -> Union[ - "Dataset", - "EnsembleDataset", - Dict[str, "Dataset"], - Dict[str, "EnsembleDataset"], - ]: - """Construct `Dataset` instance from `source` configuration.""" - if isinstance(source, str): - source = DatasetConfig.load(source) - - assert isinstance(source, DatasetConfig), ( - f"Argument `source` of type ({type(source)}) is not a " - "`DatasetConfig`" - ) - - assert ( - "graph_definition" in source.dict().keys() - ), "`DatasetConfig` incompatible with current GraphNeT version." - - # Parse set of `selection``. - if isinstance(source.selection, dict): - return cls._construct_datasets_from_dict(source) - elif ( - isinstance(source.selection, list) - and len(source.selection) - and isinstance(source.selection[0], str) - ): - return cls._construct_dataset_from_list_of_strings(source) - - cfg = source.dict() - if cfg["graph_definition"] is not None: - cfg["graph_definition"] = parse_graph_definition(cfg) - return source._dataset_class(**cfg)
- - -
-[docs] - @classmethod - def concatenate( - cls, - datasets: List["Dataset"], - ) -> "EnsembleDataset": - """Concatenate multiple `Dataset`s into one instance.""" - return EnsembleDataset(datasets)
- - - @classmethod - def _construct_datasets_from_dict( - cls, config: DatasetConfig - ) -> Dict[str, "Dataset"]: - """Construct `Dataset` for each entry in dict `self.selection`.""" - assert isinstance(config.selection, dict) - datasets: Dict[str, "Dataset"] = {} - selections: Dict[str, Union[str, List]] = deepcopy(config.selection) - for key, selection in selections.items(): - config.selection = selection - dataset = Dataset.from_config(config) - assert isinstance(dataset, (Dataset, EnsembleDataset)) - datasets[key] = dataset - - # Reset `selections`. - config.selection = selections - - return datasets - - @classmethod - def _construct_dataset_from_list_of_strings( - cls, config: DatasetConfig - ) -> "Dataset": - """Construct `Dataset` for each entry in list `self.selection`.""" - assert isinstance(config.selection, list) - datasets: List["Dataset"] = [] - selections: List[str] = deepcopy(cast(List[str], config.selection)) - for selection in selections: - config.selection = selection - dataset = Dataset.from_config(config) - assert isinstance(dataset, Dataset) - datasets.append(dataset) - - # Reset `selections`. - config.selection = selections - - return cls.concatenate(datasets) - - @classmethod - def _resolve_graphnet_paths( - cls, path: Union[str, List[str]] - ) -> Union[str, List[str]]: - if isinstance(path, list): - return [cast(str, cls._resolve_graphnet_paths(p)) for p in path] - - assert isinstance(path, str) - return ( - path.replace("$graphnet", GRAPHNET_ROOT_DIR) - .replace("$GRAPHNET", GRAPHNET_ROOT_DIR) - .replace("${graphnet}", GRAPHNET_ROOT_DIR) - .replace("${GRAPHNET}", GRAPHNET_ROOT_DIR) - ) - - def __init__( - self, - path: Union[str, List[str]], - graph_definition: GraphDefinition, - pulsemaps: Union[str, List[str]], - features: List[str], - truth: List[str], - *, - node_truth: Optional[List[str]] = None, - index_column: str = "event_no", - truth_table: str = "truth", - node_truth_table: Optional[str] = None, - string_selection: Optional[List[int]] = None, - selection: Optional[Union[str, List[int], List[List[int]]]] = None, - dtype: torch.dtype = torch.float32, - loss_weight_table: Optional[str] = None, - loss_weight_column: Optional[str] = None, - loss_weight_default_value: Optional[float] = None, - seed: Optional[int] = None, - ): - """Construct Dataset. - - Args: - path: Path to the file(s) from which this `Dataset` should read. - pulsemaps: Name(s) of the pulse map series that should be used to - construct the nodes on the individual graph objects, and their - features. Multiple pulse series maps can be used, e.g., when - different DOM types are stored in different maps. - features: List of columns in the input files that should be used as - node features on the graph objects. - truth: List of event-level columns in the input files that should - be used added as attributes on the graph objects. - node_truth: List of node-level columns in the input files that - should be used added as attributes on the graph objects. - index_column: Name of the column in the input files that contains - unique indicies to identify and map events across tables. - truth_table: Name of the table containing event-level truth - information. - node_truth_table: Name of the table containing node-level truth - information. - string_selection: Subset of strings for which data should be read - and used to construct graph objects. Defaults to None, meaning - all strings for which data exists are used. - selection: The events that should be read. This can be given either - as list of indicies (in `index_column`); or a string-based - selection used to query the `Dataset` for events passing the - selection. Defaults to None, meaning that all events in the - input files are read. - dtype: Type of the feature tensor on the graph objects returned. - loss_weight_table: Name of the table containing per-event loss - weights. - loss_weight_column: Name of the column in `loss_weight_table` - containing per-event loss weights. This is also the name of the - corresponding attribute assigned to the graph object. - loss_weight_default_value: Default per-event loss weight. - NOTE: This default value is only applied when - `loss_weight_table` and `loss_weight_column` are specified, and - in this case to events with no value in the corresponding - table/column. That is, if no per-event loss weight table/column - is provided, this value is ignored. Defaults to None. - seed: Random number generator seed, used for selecting a random - subset of events when resolving a string-based selection (e.g., - `"10000 random events ~ event_no % 5 > 0"` or `"20% random - events ~ event_no % 5 > 0"`). - graph_definition: Method that defines the graph representation. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Check(s) - if isinstance(pulsemaps, str): - pulsemaps = [pulsemaps] - - assert isinstance(features, (list, tuple)) - assert isinstance(truth, (list, tuple)) - - # Resolve reference to `$GRAPHNET` in path(s) - path = self._resolve_graphnet_paths(path) - - # Member variable(s) - self._path = path - self._selection = None - self._pulsemaps = pulsemaps - self._features = [index_column] + features - self._truth = [index_column] + truth - self._index_column = index_column - self._truth_table = truth_table - self._loss_weight_default_value = loss_weight_default_value - self._graph_definition = graph_definition - - if node_truth is not None: - assert isinstance(node_truth_table, str) - if isinstance(node_truth, str): - node_truth = [node_truth] - - self._node_truth = node_truth - self._node_truth_table = node_truth_table - - if string_selection is not None: - self.warning( - ( - "String selection detected.\n " - f"Accepted strings: {string_selection}\n " - "All other strings are ignored!" - ) - ) - if isinstance(string_selection, int): - string_selection = [string_selection] - - self._string_selection = string_selection - - self._selection = None - if self._string_selection: - self._selection = f"string in {str(tuple(self._string_selection))}" - - self._loss_weight_column = loss_weight_column - self._loss_weight_table = loss_weight_table - if (self._loss_weight_table is None) and ( - self._loss_weight_column is not None - ): - self.warning("Error: no loss weight table specified") - assert isinstance(self._loss_weight_table, str) - if (self._loss_weight_table is not None) and ( - self._loss_weight_column is None - ): - self.warning("Error: no loss weight column specified") - assert isinstance(self._loss_weight_column, str) - - self._dtype = dtype - - self._label_fns: Dict[str, Callable[[Data], Any]] = {} - - self._string_selection_resolver = StringSelectionResolver( - self, - index_column=index_column, - seed=seed, - ) - - # Implementation-specific initialisation. - self._init() - - # Set unique indices - self._indices: Union[List[int], List[List[int]]] - if selection is None: - self._indices = self._get_all_indices() - elif isinstance(selection, str): - self._indices = self._resolve_string_selection_to_indices( - selection - ) - else: - self._indices = selection - - # Purely internal member variables - self._missing_variables: Dict[str, List[str]] = {} - self._remove_missing_columns() - - # Implementation-specific post-init code. - self._post_init() - - # Properties - @property - def path(self) -> Union[str, List[str]]: - """Path to the file(s) from which this `Dataset` reads.""" - return self._path - - @property - def truth_table(self) -> str: - """Name of the table containing event-level truth information.""" - return self._truth_table - - # Abstract method(s) - @abstractmethod - def _init(self) -> None: - """Set internal representation needed to read data from input file.""" - - def _post_init(self) -> None: - """Implementation-specific code executed after the main constructor.""" - - @abstractmethod - def _get_all_indices(self) -> List[int]: - """Return a list of all unique values in `self._index_column`.""" - - @abstractmethod - def _get_event_index( - self, sequential_index: Optional[int] - ) -> Optional[int]: - """Return the event index corresponding to a `sequential_index`.""" - -
-[docs] - @abstractmethod - def query_table( - self, - table: str, - columns: Union[List[str], str], - sequential_index: Optional[int] = None, - selection: Optional[str] = None, - ) -> List[Tuple[Any, ...]]: - """Query a table at a specific index, optionally with some selection. - - Args: - table: Table to be queried. - columns: Columns to read out. - sequential_index: Sequentially numbered index - (i.e. in [0,len(self))) of the event to query. This _may_ - differ from the indexation used in `self._indices`. If no value - is provided, the entire column is returned. - selection: Selection to be imposed before reading out data. - Defaults to None. - - Returns: - List of tuples containing the values in `columns`. If the `table` - contains only scalar data for `columns`, a list of length 1 is - returned - - Raises: - ColumnMissingException: If one or more element in `columns` is not - present in `table`. - """
- - - # Public method(s) -
-[docs] - def add_label( - self, fn: Callable[[Data], Any], key: Optional[str] = None - ) -> None: - """Add custom graph label define using function `fn`.""" - if isinstance(fn, Label): - key = fn.key - assert isinstance( - key, str - ), "Please specify a key for the custom label to be added." - assert ( - key not in self._label_fns - ), f"A custom label {key} has already been defined." - self._label_fns[key] = fn
- - - def __len__(self) -> int: - """Return number of graphs in `Dataset`.""" - return len(self._indices) - - def __getitem__(self, sequential_index: int) -> Data: - """Return graph `Data` object at `index`.""" - if not (0 <= sequential_index < len(self)): - raise IndexError( - f"Index {sequential_index} not in range [0, {len(self) - 1}]" - ) - features, truth, node_truth, loss_weight = self._query( - sequential_index - ) - graph = self._create_graph(features, truth, node_truth, loss_weight) - return graph - - # Internal method(s) - def _resolve_string_selection_to_indices( - self, selection: str - ) -> List[int]: - """Resolve selection as string to list of indices. - - Selections are expected to have pandas.DataFrame.query-compatible - syntax, e.g., ``` "event_no % 5 > 0" ``` Selections may also specify a - fixed number of events to randomly sample, e.g., ``` "10000 random - events ~ event_no % 5 > 0" "20% random events ~ event_no % 5 > 0" ``` - """ - return self._string_selection_resolver.resolve(selection) - - def _remove_missing_columns(self) -> None: - """Remove columns that are not present in the input file. - - Columns are removed from `self._features` and `self._truth`. - """ - # Check if table is completely empty - if len(self) == 0: - self.warning("Dataset is empty.") - return - - # Find missing features - missing_features_set = set(self._features) - for pulsemap in self._pulsemaps: - missing = self._check_missing_columns(self._features, pulsemap) - missing_features_set = missing_features_set.intersection(missing) - - missing_features = list(missing_features_set) - - # Find missing truth variables - missing_truth_variables = self._check_missing_columns( - self._truth, self._truth_table - ) - - # Remove missing features - if missing_features: - self.warning( - "Removing the following (missing) features: " - + ", ".join(missing_features) - ) - for missing_feature in missing_features: - self._features.remove(missing_feature) - - # Remove missing truth variables - if missing_truth_variables: - self.warning( - ( - "Removing the following (missing) truth variables: " - + ", ".join(missing_truth_variables) - ) - ) - for missing_truth_variable in missing_truth_variables: - self._truth.remove(missing_truth_variable) - - def _check_missing_columns( - self, - columns: List[str], - table: str, - ) -> List[str]: - """Return a list missing columns in `table`.""" - for column in columns: - try: - self.query_table(table, [column], 0) - except ColumnMissingException: - if table not in self._missing_variables: - self._missing_variables[table] = [] - self._missing_variables[table].append(column) - except IndexError: - self.warning(f"Dataset contains no entries for {column}") - - return self._missing_variables.get(table, []) - - def _query( - self, sequential_index: int - ) -> Tuple[ - List[Tuple[float, ...]], - Tuple[Any, ...], - Optional[List[Tuple[Any, ...]]], - Optional[float], - ]: - """Query file for event features and truth information. - - The returned lists have lengths corresponding to the number of pulses - in the event. Their constituent tuples have lengths corresponding to - the number of features/attributes in each output - - Args: - sequential_index: Sequentially numbered index - (i.e. in [0,len(self))) of the event to query. This _may_ - differ from the indexation used in `self._indices`. - - Returns: - Tuple containing pulse-level event features; event-level truth - information; pulse-level truth information; and event-level - loss weights, respectively. - """ - features = [] - for pulsemap in self._pulsemaps: - features_pulsemap = self.query_table( - pulsemap, self._features, sequential_index, self._selection - ) - features.extend(features_pulsemap) - - truth: Tuple[Any, ...] = self.query_table( - self._truth_table, self._truth, sequential_index - )[0] - if self._node_truth: - assert self._node_truth_table is not None - node_truth = self.query_table( - self._node_truth_table, - self._node_truth, - sequential_index, - self._selection, - ) - else: - node_truth = None - - loss_weight: Optional[float] = None # Default - if self._loss_weight_column is not None: - assert self._loss_weight_table is not None - loss_weight_list = self.query_table( - self._loss_weight_table, - self._loss_weight_column, - sequential_index, - ) - if len(loss_weight_list): - loss_weight = loss_weight_list[0][0] - else: - loss_weight = -1.0 - - return features, truth, node_truth, loss_weight - - def _create_graph( - self, - features: List[Tuple[float, ...]], - truth: Tuple[Any, ...], - node_truth: Optional[List[Tuple[Any, ...]]] = None, - loss_weight: Optional[float] = None, - ) -> Data: - """Create Pytorch Data (i.e. graph) object. - - Args: - features: List of tuples, containing event features. - truth: List of tuples, containing truth information. - node_truth: List of tuples, containing node-level truth. - loss_weight: A weight associated with the event for weighing the - loss. - - Returns: - Graph object. - """ - # Convert nested list to simple dict - truth_dict = { - key: truth[index] for index, key in enumerate(self._truth) - } - - # Define custom labels - labels_dict = self._get_labels(truth_dict) - - # Convert nested list to simple dict - if node_truth is not None: - node_truth_array = np.asarray(node_truth) - assert self._node_truth is not None - node_truth_dict = { - key: node_truth_array[:, index] - for index, key in enumerate(self._node_truth) - } - - # Create list of truth dicts with labels - truth_dicts = [labels_dict, truth_dict] - if node_truth is not None: - truth_dicts.append(node_truth_dict) - - # Catch cases with no reconstructed pulses - if len(features): - node_features = np.asarray(features)[ - :, 1: - ] # first entry is index column - else: - node_features = np.array([]).reshape((0, len(self._features) - 1)) - - # Construct graph data object - assert self._graph_definition is not None - graph = self._graph_definition( - node_features=node_features, - node_feature_names=self._features[ - 1: - ], # first entry is index column - truth_dicts=truth_dicts, - custom_label_functions=self._label_fns, - loss_weight_column=self._loss_weight_column, - loss_weight=loss_weight, - loss_weight_default_value=self._loss_weight_default_value, - data_path=self._path, - ) - return graph - - def _get_labels(self, truth_dict: Dict[str, Any]) -> Dict[str, Any]: - """Return dictionary of labels, to be added as graph attributes.""" - if "pid" in truth_dict.keys(): - abs_pid = abs(truth_dict["pid"]) - sim_type = truth_dict["sim_type"] - - labels_dict = { - self._index_column: truth_dict[self._index_column], - "muon": int(abs_pid == 13), - "muon_stopped": int(truth_dict.get("stopped_muon") == 1), - "noise": int((abs_pid == 1) & (sim_type != "data")), - "neutrino": int( - (abs_pid != 13) & (abs_pid != 1) - ), # @TODO: `abs_pid in [12,14,16]`? - "v_e": int(abs_pid == 12), - "v_u": int(abs_pid == 14), - "v_t": int(abs_pid == 16), - "track": int( - (abs_pid == 14) & (truth_dict["interaction_type"] == 1) - ), - "dbang": self._get_dbang_label(truth_dict), - "corsika": int(abs_pid > 20), - } - else: - labels_dict = { - self._index_column: truth_dict[self._index_column], - "muon": -1, - "muon_stopped": -1, - "noise": -1, - "neutrino": -1, - "v_e": -1, - "v_u": -1, - "v_t": -1, - "track": -1, - "dbang": -1, - "corsika": -1, - } - return labels_dict - - def _get_dbang_label(self, truth_dict: Dict[str, Any]) -> int: - """Get label for double-bang classification.""" - try: - label = int(truth_dict["dbang_decay_length"] > -1) - return label - except KeyError: - return -1
- - - -
-[docs] -class EnsembleDataset(torch.utils.data.ConcatDataset): - """Construct a single dataset from a collection of datasets.""" - - def __init__(self, datasets: Iterable[Dataset]) -> None: - """Construct a single dataset from a collection of datasets. - - Args: - datasets: A collection of Datasets - """ - super().__init__(datasets=datasets)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/dataset/parquet/parquet_dataset.html b/_modules/graphnet/data/dataset/parquet/parquet_dataset.html deleted file mode 100644 index 35d6b34d7..000000000 --- a/_modules/graphnet/data/dataset/parquet/parquet_dataset.html +++ /dev/null @@ -1,500 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataset.parquet.parquet_dataset — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataset.parquet.parquet_dataset

-"""`Dataset` class(es) for reading from Parquet files."""
-
-from typing import Any, Dict, List, Optional, Tuple, Union, cast
-
-import numpy as np
-import awkward as ak
-
-from graphnet.data.dataset.dataset import Dataset, ColumnMissingException
-
-
-
-[docs] -class ParquetDataset(Dataset): - """Pytorch dataset for reading from Parquet files.""" - - # Implementing abstract method(s) - def _init(self) -> None: - # Check(s) - if not isinstance(self._path, list): - - assert isinstance(self._path, str) - - assert self._path.endswith( - ".parquet" - ), f"Format of input file `{self._path}` is not supported" - - assert ( - self._node_truth is None - ), "Argument `node_truth` is currently not supported." - assert ( - self._node_truth_table is None - ), "Argument `node_truth_table` is currently not supported." - assert ( - self._string_selection is None - ), "Argument `string_selection` is currently not supported" - - # Set custom member variable(s) - if not isinstance(self._path, list): - self._parquet_hook = ak.from_parquet(self._path, lazy=False) - else: - self._parquet_hook = ak.concatenate( - ak.from_parquet(file) for file in self._path - ) - - def _get_all_indices(self) -> List[int]: - return np.arange( - len( - ak.to_numpy( - self._parquet_hook[self._truth_table][self._index_column] - ).tolist() - ) - ).tolist() - - def _get_event_index( - self, sequential_index: Optional[int] - ) -> Optional[int]: - index: Optional[int] - if sequential_index is None: - index = None - else: - index = cast(List[int], self._indices)[sequential_index] - - return index - - def _format_dictionary_result( - self, dictionary: Dict - ) -> List[Tuple[Any, ...]]: - """Convert the output of `ak.to_list()` into a list of tuples.""" - # All scalar values - if all(map(np.isscalar, dictionary.values())): - return [tuple(dictionary.values())] - - # All arrays should have same length - array_lengths = [ - len(values) - for values in dictionary.values() - if not np.isscalar(values) - ] - assert len(set(array_lengths)) == 1, ( - f"Arrays in {dictionary} have differing lengths " - f"({set(array_lengths)})." - ) - nb_elements = array_lengths[0] - - # Broadcast scalars - for key in dictionary: - value = dictionary[key] - if np.isscalar(value): - dictionary[key] = np.repeat( - value, repeats=nb_elements - ).tolist() - - return list(map(tuple, list(zip(*dictionary.values())))) - -
-[docs] - def query_table( - self, - table: str, - columns: Union[List[str], str], - sequential_index: Optional[int] = None, - selection: Optional[str] = None, - ) -> List[Tuple[Any, ...]]: - """Query table at a specific index, optionally with some selection.""" - # Check(s) - assert ( - selection is None - ), "Argument `selection` is currently not supported" - - index = self._get_event_index(sequential_index) - - try: - if index is None: - ak_array = self._parquet_hook[table][columns][:] - else: - ak_array = self._parquet_hook[table][columns][index] - except ValueError as e: - if "does not exist (not in record)" in str(e): - raise ColumnMissingException(str(e)) - else: - raise e - - output = ak_array.to_list() - - result: List[Tuple[Any, ...]] = [] - - # Querying single index - if isinstance(output, dict): - assert list(output.keys()) == columns - result = self._format_dictionary_result(output) - - # Querying entire columm - elif isinstance(output, list): - for dictionary in output: - assert list(dictionary.keys()) == columns - result.extend(self._format_dictionary_result(dictionary)) - - return result
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/dataset/sqlite/sqlite_dataset.html b/_modules/graphnet/data/dataset/sqlite/sqlite_dataset.html deleted file mode 100644 index a6734e1c8..000000000 --- a/_modules/graphnet/data/dataset/sqlite/sqlite_dataset.html +++ /dev/null @@ -1,515 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataset.sqlite.sqlite_dataset — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataset.sqlite.sqlite_dataset

-"""`Dataset` class(es) for reading data from SQLite databases."""
-
-from typing import Any, List, Optional, Tuple, Union
-import pandas as pd
-import sqlite3
-
-from graphnet.data.dataset.dataset import Dataset, ColumnMissingException
-
-
-
-[docs] -class SQLiteDataset(Dataset): - """Pytorch dataset for reading data from SQLite databases.""" - - # Implementing abstract method(s) - def _init(self) -> None: - # Check(s) - self._database_list: Optional[List[str]] - if isinstance(self._path, list): - self._database_list = self._path - self._all_connections_established = False - self._all_connections: List[sqlite3.Connection] = [] - else: - self._database_list = None - assert isinstance(self._path, str) - assert self._path.endswith( - ".db" - ), f"Format of input file `{self._path}` is not supported." - - if self._database_list is not None: - self._current_database: Optional[int] = None - - # Set custom member variable(s) - self._features_string = ", ".join(self._features) - self._truth_string = ", ".join(self._truth) - if self._node_truth: - self._node_truth_string = ", ".join(self._node_truth) - - self._conn: Optional[sqlite3.Connection] = None - - def _post_init(self) -> None: - self._close_connection() - -
-[docs] - def query_table( - self, - table: str, - columns: Union[List[str], str], - sequential_index: Optional[int] = None, - selection: Optional[str] = None, - ) -> List[Tuple[Any, ...]]: - """Query table at a specific index, optionally with some selection.""" - # Check(s) - if isinstance(columns, list): - columns = ", ".join(columns) - - if not selection: # I.e., `None` or `""` - selection = "1=1" # Identically true, to select all - - index = self._get_event_index(sequential_index) - - # Query table - assert index is not None - self._establish_connection(index) - try: - assert self._conn - if sequential_index is None: - combined_selections = selection - else: - combined_selections = ( - f"{self._index_column} = {index} and {selection}" - ) - - result = self._conn.execute( - f"SELECT {columns} FROM {table} WHERE " - f"{combined_selections}" - ).fetchall() - except sqlite3.OperationalError as e: - if "no such column" in str(e): - raise ColumnMissingException(str(e)) - else: - raise e - return result
- - - def _get_all_indices(self) -> List[int]: - self._establish_connection(0) - indices = pd.read_sql_query( - f"SELECT {self._index_column} FROM {self._truth_table}", self._conn - ) - self._close_connection() - return indices.values.ravel().tolist() - - def _get_event_index( - self, sequential_index: Optional[int] - ) -> Optional[int]: - index: int = 0 - if sequential_index is not None: - index_ = self._indices[sequential_index] - if self._database_list is None: - assert isinstance(index_, int) - index = index_ - else: - assert isinstance(index_, list) - index = index_[0] - return index - - # Custom, internal method(s) - # @TODO: Is it necessary to return anything here? - def _establish_connection(self, i: int) -> "SQLiteDataset": - """Make sure that a sqlite3 connection is open.""" - if self._database_list is None: - assert isinstance(self._path, str) - if self._conn is None: - self._conn = sqlite3.connect(self._path) - else: - indices = self._indices[i] - assert isinstance(indices, list) - if self._conn is None: - if self._all_connections_established is False: - self._all_connections = [] - for database in self._database_list: - con = sqlite3.connect(database) - self._all_connections.append(con) - self._all_connections_established = True - self._conn = self._all_connections[indices[1]] - if indices[1] != self._current_database: - self._conn = self._all_connections[indices[1]] - self._current_database = indices[1] - return self - - # @TODO: Is it necessary to return anything here? - def _close_connection(self) -> "SQLiteDataset": - """Make sure that no sqlite3 connection is open. - - This is necessary to calls this before passing to - `torch.DataLoader` such that the dataset replica on each worker - is required to create its own connection (thereby avoiding - `sqlite3.DatabaseError: database disk image is malformed` errors - due to inability to use sqlite3 connection accross processes. - """ - if self._conn is not None: - self._conn.close() - del self._conn - self._conn = None - if self._database_list is not None: - if self._all_connections_established: - for con in self._all_connections: - con.close() - del self._all_connections - self._all_connections_established = False - self._conn = None - return self
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/pipeline.html b/_modules/graphnet/data/pipeline.html deleted file mode 100644 index 90602e63c..000000000 --- a/_modules/graphnet/data/pipeline.html +++ /dev/null @@ -1,593 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.pipeline — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.pipeline

-"""Class(es) used for analysis in PISA."""
-
-from abc import ABC
-import dill
-from functools import reduce
-import os
-from typing import Dict, List, Optional, Tuple
-
-import numpy as np
-import pandas as pd
-from pytorch_lightning import Trainer
-import sqlite3
-import torch
-from torch.utils.data import DataLoader
-
-from graphnet.data.sqlite.sqlite_utilities import create_table_and_save_to_sql
-from graphnet.training.utils import get_predictions, make_dataloader
-from graphnet.models.graphs import GraphDefinition
-
-from graphnet.utilities.logging import Logger
-
-
-
-[docs] -class InSQLitePipeline(ABC, Logger): - """Create a SQLite database for PISA analysis. - - The database will contain truth and GNN predictions and, if available, - RETRO reconstructions. - """ - - def __init__( - self, - module_dict: Dict, - features: List[str], - truth: List[str], - device: torch.device, - retro_table_name: str = "retro", - outdir: Optional[str] = None, - batch_size: int = 100, - n_workers: int = 10, - pipeline_name: str = "pipeline", - ): - """Initialise the pipeline. - - Args: - module_dict: A dictionary with GNN modules from GraphNet. E.g. - {'energy': gnn_module_for_energy_regression} - features: List of input features for the GNN modules. - truth: List of truth for the GNN ModuleList. - device: The device used for computation. - retro_table_name: Name of the retro table for. - outdir: the directory in which the pipeline database will be - stored. - batch_size: Batch size for inference. - n_workers: Number of workers used in dataloading. - pipeline_name: Name of the pipeline. If such a pipeline already - exists, an error will be prompted to avoid overwriting. - """ - self._pipeline_name = pipeline_name - self._device = device - self.n_workers = n_workers - self._features = features - self._truth = truth - self._batch_size = batch_size - self._outdir = outdir - self._module_dict = module_dict - self._retro_table_name = retro_table_name - - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - def __call__( - self, - database: str, - pulsemap: str, - graph_definition: GraphDefinition, - chunk_size: int = 1000000, - ) -> None: - """Run inference of each field in self._module_dict[target]['']. - - Args: - database: Path to database with pulsemap and truth. - pulsemap: Name of pulsemaps. - graph_definition: GraphDefinition for Dataset - chunk_size: database will be sliced in chunks of size `chunk_size`. - Use this parameter to control memory usage. - """ - outdir = self._get_outdir(database) - if isinstance( - self._device, str - ): # Because pytorch lightning insists on breaking pytorch cuda device naming scheme - device = int(self._device[-1]) - if not os.path.isdir(outdir): - dataloaders, event_batches = self._setup_dataloaders( - graph_definition=graph_definition, - chunk_size=chunk_size, - db=database, - pulsemap=pulsemap, - selection=None, - persistent_workers=False, - ) - i = 0 - for dataloader in dataloaders: - self.info("CHUNK %s / %s" % (i, len(dataloaders))) - df = self._inference(device, dataloader) - truth = self._get_truth(database, event_batches[i].tolist()) - retro = self._get_retro(database, event_batches[i].tolist()) - self._append_to_pipeline(outdir, truth, retro, df) - i += 1 - else: - self.info(outdir) - self.info( - "WARNING - Pipeline named %s already exists! \n Please rename pipeline!" - % self._pipeline_name - ) - - def _setup_dataloaders( - self, - chunk_size: int, - db: str, - pulsemap: str, - graph_definition: GraphDefinition, - selection: Optional[List[int]] = None, - persistent_workers: bool = False, - ) -> Tuple[List[DataLoader], List[np.ndarray]]: - if selection is None: - selection = self._get_all_event_nos(db) - n_chunks = np.ceil(len(selection) / chunk_size) - event_batches = np.array_split(selection, n_chunks) - dataloaders = [] - for batch in event_batches: - dataloaders.append( - make_dataloader( - db=db, - graph_definition=graph_definition, - pulsemaps=pulsemap, - features=self._features, - truth=self._truth, - batch_size=self._batch_size, - shuffle=False, - selection=batch.tolist(), - num_workers=self.n_workers, - persistent_workers=persistent_workers, - ) - ) - return dataloaders, event_batches - - def _get_all_event_nos(self, db: str) -> List[int]: - with sqlite3.connect(db) as con: - query = "SELECT event_no FROM truth" - selection = pd.read_sql(query, con).values.ravel().tolist() - return selection - - def _combine_outputs(self, dataframes: List[pd.DataFrame]) -> pd.DataFrame: - return reduce(lambda x, y: pd.merge(x, y, on="event_no"), dataframes) - - def _inference( - self, device: torch.device, dataloader: DataLoader - ) -> pd.DataFrame: - dataframes = [] - for target in self._module_dict.keys(): - # dataloader = iter(dataloader) - trainer = Trainer(devices=[device], accelerator="gpu") - model = torch.load( - self._module_dict[target]["path"], - map_location="cpu", - pickle_module=dill, - ) - model.eval() - model.inference() - results = get_predictions( - trainer, - model, - dataloader, - self._module_dict[target]["output_column_names"], - additional_attributes=["event_no"], - ) - dataframes.append( - results.sort_values("event_no").reset_index(drop=True) - ) - df = self._combine_outputs(dataframes) - return df - - def _get_outdir(self, database: str) -> str: - if self._outdir is None: - database_name = database.split("/")[-3] - outdir = ( - database.split(database_name)[0] - + database_name - + "/pipelines/" - + self._pipeline_name - ) - else: - outdir = self._outdir - return outdir - - def _get_truth(self, database: str, selection: List[int]) -> pd.DataFrame: - with sqlite3.connect(database) as con: - query = "SELECT * FROM truth WHERE event_no in %s" % str( - tuple(selection) - ) - truth = pd.read_sql(query, con) - return truth - - def _get_retro(self, database: str, selection: List[int]) -> pd.DataFrame: - try: - with sqlite3.connect(database) as con: - query = "SELECT * FROM %s WHERE event_no in %s" % ( - self._retro_table_name, - str(tuple(selection)), - ) - retro = pd.read_sql(query, con) - return retro - except: # noqa: E722 - self.info("%s table does not exist" % self._retro_table_name) - - def _append_to_pipeline( - self, - outdir: str, - truth: pd.DataFrame, - retro: pd.DataFrame, - df: pd.DataFrame, - ) -> None: - os.makedirs(outdir, exist_ok=True) - pipeline_database = outdir + "/%s.db" % self._pipeline_name - create_table_and_save_to_sql(df, "reconstruction", pipeline_database) - create_table_and_save_to_sql(truth, "truth", pipeline_database) - if isinstance(retro, pd.DataFrame): - create_table_and_save_to_sql( - retro, self._retro_table_name, pipeline_database - )
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/deployment/i3modules/graphnet_module.html b/_modules/graphnet/deployment/i3modules/graphnet_module.html deleted file mode 100644 index 994461c04..000000000 --- a/_modules/graphnet/deployment/i3modules/graphnet_module.html +++ /dev/null @@ -1,817 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.deployment.i3modules.graphnet_module — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.deployment.i3modules.graphnet_module

-"""Class(es) for deploying GraphNeT models in icetray as I3Modules."""
-from abc import abstractmethod
-from typing import TYPE_CHECKING, Any, List, Union, Dict, Tuple, Optional
-
-import dill
-import numpy as np
-import torch
-from torch_geometric.data import Data, Batch
-
-from graphnet.data.extractors import (
-    I3FeatureExtractor,
-    I3FeatureExtractorIceCubeUpgrade,
-)
-from graphnet.models import Model, StandardModel
-from graphnet.models.graphs import GraphDefinition
-from graphnet.utilities.imports import has_icecube_package
-from graphnet.utilities.config import ModelConfig
-
-if has_icecube_package() or TYPE_CHECKING:
-    from icecube.icetray import (
-        I3Module,
-        I3Frame,
-    )  # pyright: reportMissingImports=false
-    from icecube.dataclasses import (
-        I3Double,
-        I3MapKeyVectorDouble,
-    )  # pyright: reportMissingImports=false
-    from icecube import dataclasses, dataio, icetray
-
-
-
-[docs] -class GraphNeTI3Module: - """Base I3 Module for GraphNeT. - - Contains methods for extracting pulsemaps, producing graphs and writing to - frames. - """ - - def __init__( - self, - graph_definition: GraphDefinition, - pulsemap: str, - features: List[str], - pulsemap_extractor: Union[ - List[I3FeatureExtractor], I3FeatureExtractor - ], - gcd_file: str, - ): - """I3Module Constructor. - - Arguments: - graph_definition: An instance of GraphDefinition. E.g. KNNGraph. - pulsemap: the pulse map on which the module functions - features: the features that is used from the pulse map. - E.g. [dom_x, dom_y, dom_z, charge] - pulsemap_extractor: The I3FeatureExtractor used to extract the - pulsemap from the I3Frames - gcd_file: Path to the associated gcd-file. - """ - assert isinstance(graph_definition, GraphDefinition) - self._graph_definition = graph_definition - self._pulsemap = pulsemap - self._features = features - assert isinstance(gcd_file, str), "gcd_file must be string" - self._gcd_file = gcd_file - if isinstance(pulsemap_extractor, list): - self._i3_extractors = pulsemap_extractor - else: - self._i3_extractors = [pulsemap_extractor] - - for i3_extractor in self._i3_extractors: - i3_extractor.set_files(i3_file="", gcd_file=self._gcd_file) - - @abstractmethod - def __call__(self, frame: I3Frame) -> bool: - """Define here how the module acts on the frame. - - Must return True if successful. - - Return True # SUPER IMPORTANT - """ - - def _make_graph( - self, frame: I3Frame - ) -> Data: # py-l-i-n-t-:- -d-i-s-able=invalid-name - """Process Physics I3Frame into graph.""" - # Extract features - node_features = self._extract_feature_array_from_frame(frame) - # Prepare graph data - if len(node_features) > 0: - data = self._graph_definition( - node_features=node_features, - node_feature_names=self._features, - ) - return Batch.from_data_list([data]) - else: - return None - - def _extract_feature_array_from_frame(self, frame: I3Frame) -> np.array: - """Apply the I3FeatureExtractors to the I3Frame. - - Arguments: - frame: Physics I3Frame (PFrame) - - Returns: - array with pulsemap - """ - features = None - for i3extractor in self._i3_extractors: - feature_dict = i3extractor(frame) - features_pulsemap = np.array( - [feature_dict[key] for key in self._features] - ).T - if features is None: - features = features_pulsemap - else: - features = np.concatenate( - (features, features_pulsemap), axis=0 - ) - return features - - def _add_to_frame(self, frame: I3Frame, data: Dict[str, Any]) -> I3Frame: - """Add every field in data to I3Frame. - - Arguments: - frame: I3Frame (physics) - data: Dictionary containing content that will be written to frame. - - Returns: - frame: Same I3Frame as input, but with the new entries - """ - assert isinstance( - data, dict - ), f"data must be of type dict. Got {type(data)}" - for key in data.keys(): - if key not in frame: - frame.Put(key, data[key]) - return frame
- - - -
-[docs] -class I3InferenceModule(GraphNeTI3Module): - """General class for inference on i3 frames.""" - - def __init__( - self, - pulsemap: str, - features: List[str], - pulsemap_extractor: Union[ - List[I3FeatureExtractor], I3FeatureExtractor - ], - model_config: Union[ModelConfig, str], - state_dict: str, - model_name: str, - gcd_file: str, - prediction_columns: Optional[Union[List[str], str]] = None, - ): - """General class for inference on I3Frames (physics). - - Arguments: - pulsemap: the pulsmap that the model is expecting as input. - features: the features of the pulsemap that the model is expecting. - pulsemap_extractor: The extractor used to extract the pulsemap. - model_config: The ModelConfig (or path to it) that summarizes the - model used for inference. - state_dict: Path to state_dict containing the learned weights. - model_name: The name used for the model. Will help define the - named entry in the I3Frame. E.g. "dynedge". - gcd_file: path to associated gcd file. - prediction_columns: column names for the predictions of the model. - Will help define the named entry in the I3Frame. - E.g. ['energy_reco']. Optional. - """ - # Construct model & load weights - self.model = Model.from_config(model_config, trust=True) - self.model.load_state_dict(state_dict) - - super().__init__( - pulsemap=pulsemap, - features=features, - pulsemap_extractor=pulsemap_extractor, - gcd_file=gcd_file, - graph_definition=self.model._graph_definition, - ) - self.model.inference() - - self.model.to("cpu") - if prediction_columns is not None: - if isinstance(prediction_columns, str): - self.prediction_columns = [prediction_columns] - else: - self.prediction_columns = prediction_columns - else: - self.prediction_columns = self.model.prediction_labels - - self.model_name = model_name - - def __call__(self, frame: I3Frame) -> bool: - """Write predictions from model to frame.""" - # inference - graph = self._make_graph(frame) - if graph is not None: - predictions = self._inference(graph) - else: - predictions = np.repeat( - [np.nan], len(self.prediction_columns) - ).reshape(-1, len(self.prediction_columns)) - # Check dimensions of predictions and prediction columns - if len(predictions.shape) > 1: - dim = predictions.shape[1] - else: - dim = len(predictions) - assert dim == len( - self.prediction_columns - ), f"""predictions have shape {dim} but \n - prediction columns have [{self.prediction_columns}]""" - - # Build Dictionary of predictions - data = {} - assert predictions.shape[0] == 1 - for i in range(dim if isinstance(dim, int) else len(dim)): - try: - assert len(predictions[:, i]) == 1 - data[ - self.model_name + "_" + self.prediction_columns[i] - ] = I3Double(float(predictions[:, i][0])) - except IndexError: - data[ - self.model_name + "_" + self.prediction_columns[i] - ] = I3Double(predictions[0]) - - # Submission methods - frame = self._add_to_frame(frame=frame, data=data) - return True - - def _inference(self, data: Data) -> np.ndarray: - # Perform inference - task_predictions = self.model(data) - assert ( - len(task_predictions) == 1 - ), f"""This method assumes a single task. \n - Got {len(task_predictions)} tasks.""" - return self.model(data)[0].detach().numpy()
- - - -
-[docs] -class I3PulseCleanerModule(I3InferenceModule): - """A specialized module for pulse cleaning. - - It is assumed that the model provided has been trained for this. - """ - - def __init__( - self, - pulsemap: str, - features: List[str], - pulsemap_extractor: Union[ - List[I3FeatureExtractor], I3FeatureExtractor - ], - model_config: str, - state_dict: str, - model_name: str, - *, - gcd_file: str, - threshold: float = 0.7, - discard_empty_events: bool = False, - prediction_columns: Optional[Union[List[str], str]] = None, - ): - """General class for inference on I3Frames (physics). - - Arguments: - pulsemap: the pulsmap that the model is expecting as input - (the one that is being cleaned). - features: the features of the pulsemap that the model is expecting. - pulsemap_extractor: The extractor used to extract the pulsemap. - model_config: The ModelConfig (or path to it) that summarizes the - model used for inference. - state_dict: Path to state_dict containing the learned weights. - model_name: The name used for the model. Will help define the named - entry in the I3Frame. E.g. "dynedge". - gcd_file: path to associated gcd file. - threshold: the threshold for being considered a positive case. - E.g., predictions >= threshold will be considered - to be signal, all else noise. - discard_empty_events: When true, this flag will eliminate events - whose cleaned pulse series are empty. Can be used - to speed up processing especially for noise - simulation, since it will not do any writing or - further calculations. - prediction_columns: column names for the predictions of the model. - Will help define the named entry in the I3Frame. - E.g. ['energy_reco']. Optional. - """ - super().__init__( - pulsemap=pulsemap, - features=features, - pulsemap_extractor=pulsemap_extractor, - model_config=model_config, - state_dict=state_dict, - model_name=model_name, - prediction_columns=prediction_columns, - gcd_file=gcd_file, - ) - self._threshold = threshold - self._predictions_key = f"{pulsemap}_{model_name}_Predictions" - self._total_pulsemap_name = f"{pulsemap}_{model_name}_Pulses" - self._discard_empty_events = discard_empty_events - - def __call__(self, frame: I3Frame) -> bool: - """Add a cleaned pulsemap to frame.""" - # inference - gcd_file = self._gcd_file - graph = self._make_graph(frame) - if graph is None: # If there is no pulses to clean - return False - predictions = self._inference(graph) - if self._discard_empty_events: - if sum(predictions > self._threshold) == 0: - return False - - if len(predictions.shape) == 1: - predictions = predictions.reshape(-1, 1) - - assert predictions.shape[1] == 1 - - # Build Dictionary of predictions - data = {} - - predictions_map = self._construct_prediction_map( - frame=frame, predictions=predictions - ) - - # Adds the raw predictions to dictionary - if self._predictions_key not in frame.keys(): - data[self._predictions_key] = predictions_map - - # Create a pulse map mask, indicating the pulses that are over - # threshold (e.g. identified as signal) and therefore should be kept - # Using a lambda function to evaluate which pulses to keep by - # checking the prediction for each pulse - # (Adds the actual pulsemap to dictionary) - if self._total_pulsemap_name not in frame.keys(): - data[ - self._total_pulsemap_name - ] = dataclasses.I3RecoPulseSeriesMapMask( - frame, - self._pulsemap, - lambda om_key, index, pulse: predictions_map[om_key][index] - >= self._threshold, - ) - - # Submit predictions and general pulsemap - frame = self._add_to_frame(frame=frame, data=data) - data = {} - # Adds an additional pulsemap for each DOM type - if isinstance( - self._i3_extractors[0], I3FeatureExtractorIceCubeUpgrade - ): - mDOMMap, DEggMap, IceCubeMap = self._split_pulsemap_in_dom_types( - frame=frame, gcd_file=gcd_file - ) - - if f"{self._total_pulsemap_name}_mDOMs_Only" not in frame.keys(): - data[ - f"{self._total_pulsemap_name}_mDOMs_Only" - ] = dataclasses.I3RecoPulseSeriesMap(mDOMMap) - - if f"{self._total_pulsemap_name}_dEggs_Only" not in frame.keys(): - data[ - f"{self._total_pulsemap_name}_dEggs_Only" - ] = dataclasses.I3RecoPulseSeriesMap(DEggMap) - - if f"{self._total_pulsemap_name}_pDOMs_Only" not in frame.keys(): - data[ - f"{self._total_pulsemap_name}_pDOMs_Only" - ] = dataclasses.I3RecoPulseSeriesMap(IceCubeMap) - - # Submits the additional pulsemaps to the frame - frame = self._add_to_frame(frame=frame, data=data) - - return True - - def _split_pulsemap_in_dom_types( - self, frame: I3Frame, gcd_file: Any - ) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]: - """Will split the cleaned pulsemap into multiple pulsemaps. - - Arguments: - frame: I3Frame (physics) - gcd_file: path to associated gcd file - - Returns: - mDOMMap, DeGGMap, IceCubeMap - """ - g = dataio.I3File(gcd_file) - gFrame = g.pop_frame() - while "I3Geometry" not in gFrame.keys(): - gFrame = g.pop_frame() - omGeoMap = gFrame["I3Geometry"].omgeo - - mDOMMap, DEggMap, IceCubeMap = {}, {}, {} - pulses = dataclasses.I3RecoPulseSeriesMap.from_frame( - frame, self._total_pulsemap_name - ) - for P in pulses: - om = omGeoMap[P[0]] - if om.omtype == 130: # "mDOM" - mDOMMap[P[0]] = P[1] - elif om.omtype == 120: # "DEgg" - DEggMap[P[0]] = P[1] - elif om.omtype == 20: # "IceCube / pDOM" - IceCubeMap[P[0]] = P[1] - return mDOMMap, DEggMap, IceCubeMap - - def _construct_prediction_map( - self, frame: I3Frame, predictions: np.ndarray - ) -> I3MapKeyVectorDouble: - """Make a pulsemap from predictions (for all OM types). - - Arguments: - frame: I3Frame (physics) - predictions: predictions from GNN - - Returns: - predictions_map: a pulsemap from predictions - """ - pulsemap = dataclasses.I3RecoPulseSeriesMap.from_frame( - frame, self._pulsemap - ) - - idx = 0 - predictions = predictions.squeeze(1) - predictions_map = dataclasses.I3MapKeyVectorDouble() - for om_key, pulses in pulsemap.items(): - num_pulses = len(pulses) - predictions_map[om_key] = predictions[ - idx : idx + num_pulses - ].tolist() - idx += num_pulses - - # Checks - assert idx == len( - predictions - ), """Not all predictions were mapped to pulses,\n - validation of predictions have failed.""" - - assert ( - pulsemap.keys() == predictions_map.keys() - ), """Input pulse map and predictions map do \n - not contain exactly the same OMs""" - return predictions_map
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/coarsening.html b/_modules/graphnet/models/coarsening.html deleted file mode 100644 index c9a941617..000000000 --- a/_modules/graphnet/models/coarsening.html +++ /dev/null @@ -1,708 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.coarsening — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.coarsening

-"""Class(es) for coarsening operations (i.e., clustering, or local pooling)."""
-
-from abc import abstractmethod
-from typing import List, Optional, Union
-from copy import deepcopy
-import torch
-from torch import LongTensor, Tensor
-from torch_geometric.data import Data, Batch
-from sklearn.cluster import DBSCAN
-
-# from torch_geometric.utils import unbatch_edge_index
-from graphnet.models.components.pool import (
-    group_by,
-    avg_pool,
-    max_pool,
-    min_pool,
-    sum_pool,
-    avg_pool_x,
-    max_pool_x,
-    min_pool_x,
-    sum_pool_x,
-    std_pool_x,
-)
-from graphnet.models import Model
-
-# Utility method(s)
-from torch_geometric.utils import degree
-
-# NOTE: From [https://github.com/pyg-team/pytorch_geometric/pull/4903]
-# TODO:  Remove once bumping to torch_geometric>=2.1.0
-#       See [https://github.com/pyg-team/pytorch_geometric/blob/master/CHANGELOG.md]
-
-
-
-[docs] -def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: - # noqa: D401 - r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector. - - Args: - edge_index (Tensor): The edge_index tensor. Must be ordered. - batch (LongTensor): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. Must be ordered. - :rtype: :class:`List[Tensor]` - """ - deg = degree(batch, dtype=torch.int64) - ptr = torch.cat([deg.new_zeros(1), deg.cumsum(dim=0)[:-1]], dim=0) - - edge_batch = batch[edge_index[0]] - edge_index = edge_index - ptr[edge_batch] - sizes = degree(edge_batch, dtype=torch.int64).cpu().tolist() - return edge_index.split(sizes, dim=1)
- - - -
-[docs] -class Coarsening(Model): - """Base class for coarsening operations.""" - - # Class variables - reduce_options = { - "avg": (avg_pool, avg_pool_x), - "min": (min_pool, min_pool_x), - "max": (max_pool, max_pool_x), - "sum": (sum_pool, sum_pool_x), - } - - def __init__( - self, - reduce: str = "avg", - transfer_attributes: bool = True, - ): - """Construct `Coarsening`.""" - assert reduce in self.reduce_options - - ( - self._reduce_method, - self._attribute_reduce_method, - ) = self.reduce_options[reduce] - self._do_transfer_attributes = transfer_attributes - - # Base class constructor - super().__init__() - - @abstractmethod - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - - def _additional_features(self, cluster: LongTensor, data: Batch) -> Tensor: - """Perform additional poolings of feature tensor `x` on `data`. - - By default the nominal `pooling_method` is used for features as well. - This method can be overwritten for bespoke coarsening operations. - """ - - def _transfer_attributes( - self, cluster: LongTensor, original_data: Batch, pooled_data: Batch - ) -> Batch: - """Transfer attributes on `original_data` to `pooled_data`.""" - # Check(s) - if not self._do_transfer_attributes: - return pooled_data - - attributes = list(original_data._store.keys()) - batch: Optional[LongTensor] = original_data.batch - for ix, attr in enumerate(attributes): - if attr not in pooled_data._store: - values: Tensor = getattr(original_data, attr) - - attr_is_node_level_tensor = False - if isinstance(values, Tensor): - if batch is None: - attr_is_node_level_tensor = ( - values.dim() > 1 or values.size(dim=0) > 1 - ) - else: - attr_is_node_level_tensor = ( - values.size() == original_data.batch.size() - ) - - if attr_is_node_level_tensor: - values = self._attribute_reduce_method( - cluster, - values, - batch=torch.zeros_like(values, dtype=torch.int32), - )[0] - - setattr(pooled_data, attr, values) - - return pooled_data - -
-[docs] - def forward(self, data: Union[Data, Batch]) -> Union[Data, Batch]: - """Perform coarsening operation.""" - # Get tensor of cluster indices for each node. - cluster: LongTensor = self._perform_clustering(data) - - # Check whether a graph has already been built. Otherwise, set a dummy - # connectivity, as this is required by pooling functions. - edge_index = data.edge_index - if edge_index is None: - data.edge_index = torch.tensor([[]], dtype=torch.int64) - - # Pool `data` object, including `x`, `batch`. and `edge_index`. - pooled_data: Batch = self._reduce_method(cluster, data) - - # Optionally overwrite feature tensor - x = self._additional_features(cluster, data) - if x is not None: - pooled_data.x = torch.cat( - ( - pooled_data.x, - x, - ), - dim=1, - ) - - # Reset `edge_index` if necessary. - if edge_index is None: - data.edge_index = edge_index - pooled_data.edge_index = edge_index - - # Transfer attributes on `data`, pooling as required. - pooled_data = self._transfer_attributes(cluster, data, pooled_data) - - # Reconstruct Batch Attributes - if isinstance(data, Batch): # if a Batch object - pooled_data = self._reconstruct_batch(data, pooled_data) - return pooled_data
- - - def _reconstruct_batch(self, original: Data, pooled: Data) -> Data: - pooled = self._add_slice_dict(original, pooled) - pooled = self._add_inc_dict(original, pooled) - return pooled - - def _add_slice_dict(self, original: Data, pooled: Data) -> Data: - # Copy original slice_dict and count nodes in each graph in pooled batch - slice_dict = deepcopy(original._slice_dict) - _, counts = torch.unique_consecutive(pooled.batch, return_counts=True) - # Reconstruct the entry in slice_dict for pulsemaps - only these are affected by pooling - pulsemap_slice = [0] - for i in range(len(counts)): - pulsemap_slice.append(pulsemap_slice[i] + counts[i].item()) - - # Identifies pulsemap entries in slice_dict and set them to pulsemap_slice - for field in slice_dict.keys(): - if (original._num_graphs) == slice_dict[field][-1]: - pass # not pulsemap, so skip - else: - slice_dict[field] = pulsemap_slice - pooled._slice_dict = slice_dict - return pooled - - def _add_inc_dict(self, original: Data, pooled: Data) -> Data: - # not changed by coarsening - pooled._inc_dict = deepcopy(original._inc_dict) - return pooled
- - - -
-[docs] -class AttributeCoarsening(Coarsening): - """Coarsen pulses based on specified attributes.""" - - def __init__( - self, - attributes: List[str], - reduce: str = "avg", - transfer_attributes: bool = True, - ): - """Construct `SimpleCoarsening`.""" - self._attributes = attributes - - # Base class constructor - super().__init__(reduce, transfer_attributes) - - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - dom_index = group_by(data, self._attributes) - return dom_index
- - - -
-[docs] -class DOMCoarsening(Coarsening): - """Coarsen pulses to DOM-level.""" - - def __init__( - self, - reduce: str = "avg", - transfer_attributes: bool = True, - keys: Optional[List[str]] = None, - ): - """Cluster pulses on the same DOM.""" - super().__init__(reduce, transfer_attributes) - if keys is None: - self._keys = [ - "dom_x", - "dom_y", - "dom_z", - "rde", - "pmt_area", - ] - else: - self._keys = keys - - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - dom_index = group_by(data, self._keys) - return dom_index
- - - -
-[docs] -class CustomDOMCoarsening(DOMCoarsening): - """Coarsen pulses to DOM-level with additional attributes.""" - - def _additional_features(self, cluster: LongTensor, data: Data) -> Tensor: - """Perform Additional poolings of feature tensor `x` on `data`.""" - batch = data.batch - - features = data.features - if batch is not None: - features = [feats[0] for feats in features] - - ix_time = features.index("dom_time") - ix_charge = features.index("charge") - - time = data.x[:, ix_time] - charge = data.x[:, ix_charge] - - x = torch.stack( - ( - min_pool_x(cluster, time, batch)[0], - max_pool_x(cluster, time, batch)[0], - std_pool_x(cluster, time, batch)[0], - min_pool_x(cluster, charge, batch)[0], - max_pool_x(cluster, charge, batch)[0], - std_pool_x(cluster, charge, batch)[0], - sum_pool_x(cluster, torch.ones_like(charge), batch)[ - 0 - ], # Num. nodes (pulses) per cluster (DOM) - ), - dim=1, - ) - - return x
- - - -
-[docs] -class DOMAndTimeWindowCoarsening(Coarsening): - """Coarsen pulses to DOM-level, with additional time-window clustering.""" - - def __init__( - self, - time_window: float, - reduce: str = "avg", - transfer_attributes: bool = True, - keys: List[str] = [ - "dom_x", - "dom_y", - "dom_z", - "rde", - "pmt_area", - ], - time_key: str = "dom_time", - ): - """Cluster pulses on the same DOM within `time_window`.""" - super().__init__(reduce, transfer_attributes) - self._time_window = time_window - self._cluster_method = DBSCAN(self._time_window, min_samples=1) - self._keys = keys - self._time_key = time_key - - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - dom_index = group_by(data, self._keys) - if data.batch is not None: - features = data.features[0] - else: - features = data.features - - ix_time = features.index(self._time_key) - hit_times = data.x[:, ix_time] - - # Scale up dom_index to make sure clusters are well separated - times_and_domids = torch.stack( - [ - hit_times, - dom_index * self._time_window * 10, - ] - ).T - clusters = torch.tensor( - self._cluster_method.fit_predict(times_and_domids.cpu()), - device=hit_times.device, - ) - - return clusters
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/components/layers.html b/_modules/graphnet/models/components/layers.html deleted file mode 100644 index 555ac857b..000000000 --- a/_modules/graphnet/models/components/layers.html +++ /dev/null @@ -1,579 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.components.layers — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.components.layers

-"""Class(es) implementing layers to be used in `graphnet` models."""
-
-from typing import Any, Callable, Optional, Sequence, Union, List, Tuple
-
-import torch
-from torch.functional import Tensor
-from torch_geometric.nn import EdgeConv
-from torch_geometric.nn.pool import knn_graph
-from torch_geometric.typing import Adj, PairTensor
-from torch_geometric.nn.conv import MessagePassing
-from torch_geometric.nn.inits import reset
-from torch.nn.modules import TransformerEncoder, TransformerEncoderLayer
-from torch.nn.modules.normalization import LayerNorm
-from torch_geometric.utils import to_dense_batch
-from pytorch_lightning import LightningModule
-
-
-
-[docs] -class DynEdgeConv(EdgeConv, LightningModule): - """Dynamical edge convolution layer.""" - - def __init__( - self, - nn: Callable, - aggr: str = "max", - nb_neighbors: int = 8, - features_subset: Optional[Union[Sequence[int], slice]] = None, - **kwargs: Any, - ): - """Construct `DynEdgeConv`. - - Args: - nn: The MLP/torch.Module to be used within the `EdgeConv`. - aggr: Aggregation method to be used with `EdgeConv`. - nb_neighbors: Number of neighbours to be clustered after the - `EdgeConv` operation. - features_subset: Subset of features in `Data.x` that should be used - when dynamically performing the new graph clustering after the - `EdgeConv` operation. Defaults to all features. - **kwargs: Additional features to be passed to `EdgeConv`. - """ - # Check(s) - if features_subset is None: - features_subset = slice(None) # Use all features - assert isinstance(features_subset, (list, slice)) - - # Base class constructor - super().__init__(nn=nn, aggr=aggr, **kwargs) - - # Additional member variables - self.nb_neighbors = nb_neighbors - self.features_subset = features_subset - -
-[docs] - def forward( - self, x: Tensor, edge_index: Adj, batch: Optional[Tensor] = None - ) -> Tensor: - """Forward pass.""" - # Standard EdgeConv forward pass - x = super().forward(x, edge_index) - - # Recompute adjacency - edge_index = knn_graph( - x=x[:, self.features_subset], - k=self.nb_neighbors, - batch=batch, - ).to(self.device) - - return x, edge_index
-
- - - -
-[docs] -class EdgeConvTito(MessagePassing, LightningModule): - """Implementation of EdgeConvTito layer used in TITO solution for. - - 'IceCube - Neutrinos in Deep' kaggle competition. - """ - - def __init__( - self, - nn: Callable, - aggr: str = "max", - **kwargs: Any, - ): - """Construct `EdgeConvTito`. - - Args: - nn: The MLP/torch.Module to be used within the `EdgeConvTito`. - aggr: Aggregation method to be used with `EdgeConvTito`. - **kwargs: Additional features to be passed to `EdgeConvTito`. - """ - super().__init__(aggr=aggr, **kwargs) - self.nn = nn - self.reset_parameters() - -
-[docs] - def reset_parameters(self) -> None: - """Reset all learnable parameters of the module.""" - reset(self.nn)
- - -
-[docs] - def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor: - """Forward pass.""" - if isinstance(x, Tensor): - x = (x, x) - # propagate_type: (x: PairTensor) - return self.propagate(edge_index, x=x, size=None)
- - -
-[docs] - def message(self, x_i: Tensor, x_j: Tensor) -> Tensor: - """Edgeconvtito message passing.""" - return self.nn( - torch.cat([x_i, x_j - x_i, x_j], dim=-1) - ) # EdgeConvTito
- - - def __repr__(self) -> str: - """Print out module name.""" - return f"{self.__class__.__name__}(nn={self.nn})"
- - - -
-[docs] -class DynTrans(EdgeConvTito, LightningModule): - """Implementation of dynTrans1 layer used in TITO solution for. - - 'IceCube - Neutrinos in Deep' kaggle competition. - """ - - def __init__( - self, - layer_sizes: Optional[List[int]] = None, - aggr: str = "max", - features_subset: Optional[Union[Sequence[int], slice]] = None, - n_head: int = 8, - **kwargs: Any, - ): - """Construct `DynTrans`. - - Args: - nn: The MLP/torch.Module to be used within the `DynTrans`. - layer_sizes: List of layer sizes to be used in `DynTrans`. - aggr: Aggregation method to be used with `DynTrans`. - features_subset: Subset of features in `Data.x` that should be used - when dynamically performing the new graph clustering after the - `EdgeConv` operation. Defaults to all features. - n_head: Number of heads to be used in the multiheadattention models. - **kwargs: Additional features to be passed to `DynTrans`. - """ - # Check(s) - if features_subset is None: - features_subset = slice(None) # Use all features - assert isinstance(features_subset, (list, slice)) - - if layer_sizes is None: - layer_sizes = [256, 256, 256] - layers = [] - for ix, (nb_in, nb_out) in enumerate( - zip(layer_sizes[:-1], layer_sizes[1:]) - ): - if ix == 0: - nb_in *= 3 # edgeConv1 - layers.append(torch.nn.Linear(nb_in, nb_out)) - layers.append(torch.nn.LeakyReLU()) - d_model = nb_out - - # Base class constructor - super().__init__(nn=torch.nn.Sequential(*layers), aggr=aggr, **kwargs) - - # Additional member variables - self.features_subset = features_subset - - self.norm1 = LayerNorm(d_model, eps=1e-5) # lNorm - - # Transformer layer(s) - encoder_layer = TransformerEncoderLayer( - d_model=d_model, - nhead=n_head, - batch_first=True, - norm_first=False, - ) - self._transformer_encoder = TransformerEncoder( - encoder_layer, num_layers=1 - ) - -
-[docs] - def forward( - self, x: Tensor, edge_index: Adj, batch: Optional[Tensor] = None - ) -> Tensor: - """Forward pass.""" - x_out = super().forward(x, edge_index) - - if x_out.shape[-1] == x.shape[-1]: - x = x + x_out - else: - x = x_out - - x = self.norm1(x) # lNorm - - # Transformer layer - x, mask = to_dense_batch(x, batch) - x = self._transformer_encoder(x, src_key_padding_mask=~mask) - x = x[mask] - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/components/pool.html b/_modules/graphnet/models/components/pool.html deleted file mode 100644 index 2523dfb69..000000000 --- a/_modules/graphnet/models/components/pool.html +++ /dev/null @@ -1,656 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.components.pool — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.components.pool

-"""Functions for performing pooling/clustering/coarsening."""
-
-from typing import Any, Callable, List, Optional, Union
-
-import torch
-from torch import LongTensor, Tensor
-from torch_geometric.data import Data, Batch
-from torch_geometric.nn.pool.consecutive import consecutive_cluster
-from torch_geometric.nn.pool.pool import pool_edge, pool_batch, pool_pos
-from torch_scatter import scatter, scatter_std
-
-from torch_geometric.nn.pool import (
-    avg_pool,
-    max_pool,
-    avg_pool_x,
-    max_pool_x,
-)
-
-
-
-[docs] -def min_pool( - cluster: LongTensor, data: Data, transform: Optional[Any] = None -) -> Data: - """Perform min-pooling of `Data`. - - Like `max_pool, just negating `data.x`. - """ - data.x = -data.x - data_pooled = max_pool( - cluster, - data, - transform, - ) - data.x = -data.x - data_pooled.x = -data_pooled.x - return data_pooled
- - - -
-[docs] -def min_pool_x( - cluster: LongTensor, - x: Tensor, - batch: LongTensor, - size: Optional[int] = None, -) -> Tensor: - """Perform min-pooling of `Tensor`. - - Like `max_pool_x, just negating `x`. - """ - ret = max_pool_x(cluster, -x, batch, size) - if size is None: - return (-ret[0], ret[1]) - else: - return -ret
- - - -
-[docs] -def sum_pool_and_distribute( - tensor: Tensor, - cluster_index: LongTensor, - batch: Optional[LongTensor] = None, -) -> Tensor: - """Sum-pool values and distribute result to the individual nodes.""" - if batch is None: - batch = torch.zeros(tensor.size(dim=0)).long() - tensor_pooled, _ = sum_pool_x(cluster_index, tensor, batch) - inv, _ = consecutive_cluster(cluster_index) - tensor_unpooled = tensor_pooled[inv] - return tensor_unpooled
- - - -def _group_identical( - tensor: Tensor, batch: Optional[LongTensor] = None -) -> LongTensor: - """Group rows in `tensor` that are identical. - - Args: - tensor: Tensor of shape [N, F]. - batch: Batch indices, to only group identical rows within batches. - - Returns: - List of group indices, from 0 to num. groups - 1, assigning all - identical rows to the same group. - """ - if batch is not None: - tensor = torch.cat((batch.unsqueeze(dim=1), tensor), dim=1) - return torch.unique(tensor, return_inverse=True, sorted=False, dim=0)[1] - - -
-[docs] -def group_by(data: Union[Data, Batch], keys: List[str]) -> LongTensor: - """Group nodes in `data` that have identical values of `keys`. - - This grouping is done with in each event in case of batching. This allows - for, e.g., assigning the same index to all pulses on the same PMT or DOM in - the same event. This can be used for coarsening graphs, e.g., from pulse- - level to DOM-level by aggregating feature across each group returned by this - method. - - Example: - Given: - data.f1 = [1,1,2,2,2] - data.f2 = [6,7,7,7,8] - Calls: - groupby(data, ['f1']) -> [0, 0, 1, 1, 1] - groupby(data, ['f2']) -> [0, 1, 1, 1, 2] - groupby(data, ['f1', 'f2']) -> [0, 1, 2, 2, 3] - """ - features = [getattr(data, key) for key in keys] - tensor = torch.stack(features).T # .int() @TODO: Required? Use rounding? - batch = getattr(data, "batch", None) - index = _group_identical(tensor, batch) - return index
- - - -
-[docs] -def group_pulses_to_dom(data: Data) -> Data: - """Group pulses on the same DOM, using DOM and string number.""" - data.dom_index = group_by(data, ["dom_number", "string"]) - return data
- - - -
-[docs] -def group_pulses_to_pmt(data: Data) -> Data: - """Group pulses on the same PMT, using PMT, DOM, and string number.""" - data.pmt_index = group_by(data, ["pmt_number", "dom_number", "string"]) - return data
- - - -# Below mirroring `torch_geometric.nn.pool.{avg,max}_pool.py`. -def _sum_pool_x( - cluster: LongTensor, x: Tensor, size: Optional[int] = None -) -> Tensor: - return scatter(x, cluster, dim=0, dim_size=size, reduce="sum") - - -def _std_pool_x( - cluster: LongTensor, x: Tensor, size: Optional[int] = None -) -> Tensor: - return scatter_std(x, cluster, dim=0, dim_size=size, unbiased=False) - - -
-[docs] -def sum_pool_x( - cluster: LongTensor, - x: Tensor, - batch: LongTensor, - size: Optional[int] = None, -) -> Tensor: - r"""Sum-pool node features according to the clustering defined in `cluster`. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - x: Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`. - batch: Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, - B-1\}}^N`, which assigns each node to a specific example. - size: The maximum number of clusters in a single - example. This property is useful to obtain a batch-wise dense - representation, *e.g.* for applying FC layers, but should only be - used if the size of the maximum number of clusters per example is - known in advance. - """ - if size is not None: - batch_size = int(batch.max().item()) + 1 - return _sum_pool_x(cluster, x, batch_size * size), None - - cluster, perm = consecutive_cluster(cluster) - x = _sum_pool_x(cluster, x) - batch = pool_batch(perm, batch) - - return x, batch
- - - -
-[docs] -def std_pool_x( - cluster: LongTensor, - x: Tensor, - batch: LongTensor, - size: Optional[int] = None, -) -> Tensor: - r"""Std-pool node features according to the clustering defined in `cluster`. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - x: Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`. - batch: Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, - B-1\}}^N`, which assigns each node to a specific example. - size: The maximum number of clusters in a single - example. This property is useful to obtain a batch-wise dense - representation, *e.g.* for applying FC layers, but should only be - used if the size of the maximum number of clusters per example is - known in advance. - """ - if size is not None: - batch_size = int(batch.max().item()) + 1 - return _std_pool_x(cluster, x, batch_size * size), None - - cluster, perm = consecutive_cluster(cluster) - x = _std_pool_x(cluster, x) - batch = pool_batch(perm, batch) - - return x, batch
- - - -
-[docs] -def sum_pool( - cluster: LongTensor, data: Data, transform: Optional[Callable] = None -) -> Data: - r"""Pool and coarsen graph according to the clustering defined in `cluster`. - - All nodes within the same cluster will be represented as one node. - Final node features are defined by the *sum* of features of all nodes - within the same cluster, node positions are averaged and edge indices are - defined to be the union of the edge indices of all nodes within the same - cluster. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - data: Graph data object. - transform: A function/transform that takes in the - coarsened and pooled :obj:`torch_geometric.data.Data` object and - returns a transformed version. - """ - cluster, perm = consecutive_cluster(cluster) - - x = None if data.x is None else _sum_pool_x(cluster, data.x) - index, attr = pool_edge(cluster, data.edge_index, data.edge_attr) - batch = None if data.batch is None else pool_batch(perm, data.batch) - pos = None if data.pos is None else pool_pos(cluster, data.pos) - - data = Batch(batch=batch, x=x, edge_index=index, edge_attr=attr, pos=pos) - - if transform is not None: - data = transform(data) - - return data
- - - -
-[docs] -def std_pool( - cluster: LongTensor, data: Data, transform: Optional[Callable] = None -) -> Data: - r"""Pool and coarsen graph according to the clustering defined in `cluster`. - - All nodes within the same cluster will be represented as one node. - Final node features are defined by the *std* of features of all nodes - within the same cluster, node positions are averaged and edge indices are - defined to be the union of the edge indices of all nodes within the same - cluster. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - data: Graph data object. - transform: A function/transform that takes in the - coarsened and pooled :obj:`torch_geometric.data.Data` object and - returns a transformed version. - """ - cluster, perm = consecutive_cluster(cluster) - - x = None if data.x is None else _std_pool_x(cluster, data.x) - index, attr = pool_edge(cluster, data.edge_index, data.edge_attr) - batch = None if data.batch is None else pool_batch(perm, data.batch) - pos = None if data.pos is None else pool_pos(cluster, data.pos) - - data = Batch(batch=batch, x=x, edge_index=index, edge_attr=attr, pos=pos) - - if transform is not None: - data = transform(data) - - return data
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/detector/detector.html b/_modules/graphnet/models/detector/detector.html deleted file mode 100644 index 814e04815..000000000 --- a/_modules/graphnet/models/detector/detector.html +++ /dev/null @@ -1,419 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.detector.detector — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.detector.detector

-"""Base detector-specific `Model` class(es)."""
-
-from abc import abstractmethod
-from typing import Dict, Callable, List
-
-from torch_geometric.data import Data
-import torch
-
-from graphnet.models import Model
-from graphnet.utilities.decorators import final
-
-
-
-[docs] -class Detector(Model): - """Base class for all detector-specific read-ins in graphnet.""" - - def __init__(self) -> None: - """Construct `Detector`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - -
-[docs] - @abstractmethod - def feature_map(self) -> Dict[str, Callable]: - """List of features used/assumed by inheriting `Detector` objects."""
- - -
-[docs] - @final - def forward( # type: ignore - self, node_features: torch.tensor, node_feature_names: List[str] - ) -> Data: - """Pre-process graph `Data` features and build graph adjacency.""" - return self._standardize(node_features, node_feature_names)
- - - @final - def _standardize( - self, node_features: torch.tensor, node_feature_names: List[str] - ) -> Data: - for idx, feature in enumerate(node_feature_names): - try: - node_features[:, idx] = self.feature_map()[feature]( # type: ignore - node_features[:, idx] - ) - except KeyError as e: - self.warning( - f"""No Standardization function found for '{feature}'""" - ) - raise e - return node_features - - def _identity(self, x: torch.tensor) -> torch.tensor: - """Apply no standardization to input.""" - return x
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/detector/icecube.html b/_modules/graphnet/models/detector/icecube.html deleted file mode 100644 index 76d9a8e16..000000000 --- a/_modules/graphnet/models/detector/icecube.html +++ /dev/null @@ -1,528 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.detector.icecube — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.detector.icecube

-"""IceCube-specific `Detector` class(es)."""
-
-from typing import Dict, Callable
-import torch
-
-from graphnet.models.detector.detector import Detector
-
-
-
-[docs] -class IceCube86(Detector): - """`Detector` class for IceCube-86.""" - -
-[docs] - def feature_map(self) -> Dict[str, Callable]: - """Map standardization functions to each dimension of input data.""" - feature_map = { - "dom_x": self._dom_xyz, - "dom_y": self._dom_xyz, - "dom_z": self._dom_xyz, - "dom_time": self._dom_time, - "charge": self._charge, - "rde": self._rde, - "pmt_area": self._pmt_area, - } - return feature_map
- - - def _dom_xyz(self, x: torch.tensor) -> torch.tensor: - return x / 500.0 - - def _dom_time(self, x: torch.tensor) -> torch.tensor: - return (x - 1.0e04) / 3.0e4 - - def _charge(self, x: torch.tensor) -> torch.tensor: - return torch.log10(x) - - def _rde(self, x: torch.tensor) -> torch.tensor: - return (x - 1.25) / 0.25 - - def _pmt_area(self, x: torch.tensor) -> torch.tensor: - return x / 0.05
- - - -
-[docs] -class IceCubeKaggle(Detector): - """`Detector` class for Kaggle Competition.""" - -
-[docs] - def feature_map(self) -> Dict[str, Callable]: - """Map standardization functions to each dimension of input data.""" - feature_map = { - "x": self._xyz, - "y": self._xyz, - "z": self._xyz, - "time": self._time, - "charge": self._charge, - "auxiliary": self._identity, - } - return feature_map
- - - def _xyz(self, x: torch.tensor) -> torch.tensor: - return x / 500.0 - - def _time(self, x: torch.tensor) -> torch.tensor: - return (x - 1.0e04) / 3.0e4 - - def _charge(self, x: torch.tensor) -> torch.tensor: - return torch.log10(x) / 3.0
- - - -
-[docs] -class IceCubeDeepCore(Detector): - """`Detector` class for IceCube-DeepCore.""" - -
-[docs] - def feature_map(self) -> Dict[str, Callable]: - """Map standardization functions to each dimension of input data.""" - feature_map = { - "dom_x": self._dom_xy, - "dom_y": self._dom_xy, - "dom_z": self._dom_z, - "dom_time": self._dom_time, - "charge": self._identity, - "rde": self._rde, - "pmt_area": self._pmt_area, - } - return feature_map
- - - def _dom_xy(self, x: torch.tensor) -> torch.tensor: - return x / 100.0 - - def _dom_z(self, x: torch.tensor) -> torch.tensor: - return (x + 350.0) / 100.0 - - def _dom_time(self, x: torch.tensor) -> torch.tensor: - return ((x / 1.05e04) - 1.0) * 20.0 - - def _rde(self, x: torch.tensor) -> torch.tensor: - return (x - 1.25) / 0.25 - - def _pmt_area(self, x: torch.tensor) -> torch.tensor: - return x / 0.05
- - - -
-[docs] -class IceCubeUpgrade(Detector): - """`Detector` class for IceCube-Upgrade.""" - -
-[docs] - def feature_map(self) -> Dict[str, Callable]: - """Map standardization functions to each dimension of input data.""" - feature_map = { - "dom_x": self._dom_xyz, - "dom_y": self._dom_xyz, - "dom_z": self._dom_xyz, - "dom_time": self._dom_time, - "charge": self._charge, - "rde": self._identity, - "pmt_area": self._pmt_area, - "string": self._string, - "pmt_number": self._pmt_number, - "dom_number": self._dom_number, - "pmt_dir_x": self._identity, - "pmt_dir_y": self._identity, - "pmt_dir_z": self._identity, - "dom_type": self._dom_type, - } - - return feature_map
- - - def _dom_time(self, x: torch.tensor) -> torch.tensor: - return (x / 2e04) - 1.0 - - def _charge(self, x: torch.tensor) -> torch.tensor: - return torch.log10(x) / 2.0 - - def _string(self, x: torch.tensor) -> torch.tensor: - return (x - 50.0) / 50.0 - - def _pmt_number(self, x: torch.tensor) -> torch.tensor: - return x / 20.0 - - def _dom_number(self, x: torch.tensor) -> torch.tensor: - return (x - 60.0) / 60.0 - - def _dom_type(self, x: torch.tensor) -> torch.tensor: - return x / 130.0 - - def _dom_xyz(self, x: torch.tensor) -> torch.tensor: - return x / 500.0 - - def _pmt_area(self, x: torch.tensor) -> torch.tensor: - return x / 0.05
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/detector/prometheus.html b/_modules/graphnet/models/detector/prometheus.html deleted file mode 100644 index 7f234af38..000000000 --- a/_modules/graphnet/models/detector/prometheus.html +++ /dev/null @@ -1,395 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.detector.prometheus — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.detector.prometheus

-"""Prometheus-specific `Detector` class(es)."""
-
-from typing import Dict, Callable
-import torch
-
-from graphnet.models.detector.detector import Detector
-
-
-
-[docs] -class Prometheus(Detector): - """`Detector` class for Prometheus prototype.""" - -
-[docs] - def feature_map(self) -> Dict[str, Callable]: - """Map standardization functions to each dimension.""" - feature_map = { - "sensor_pos_x": self._sensor_pos_xy, - "sensor_pos_y": self._sensor_pos_xy, - "sensor_pos_z": self._sensor_pos_z, - "t": self._t, - } - return feature_map
- - - def _sensor_pos_xy(self, x: torch.tensor) -> torch.tensor: - return x / 100 - - def _sensor_pos_z(self, x: torch.tensor) -> torch.tensor: - return (x + 350) / 100 - - def _t(self, x: torch.tensor) -> torch.tensor: - return ((x / 1.05e04) - 1.0) * 20.0
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/convnet.html b/_modules/graphnet/models/gnn/convnet.html deleted file mode 100644 index 61066449f..000000000 --- a/_modules/graphnet/models/gnn/convnet.html +++ /dev/null @@ -1,484 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.convnet — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.convnet

-"""Implementation of the ConvNet GNN model architecture.
-
-Author: Martin Ha Minh
-"""
-
-import torch
-from torch import Tensor
-from torch.nn import BatchNorm1d, Linear, Dropout
-import torch.nn.functional as F
-from torch_geometric.nn import TAGConv, global_add_pool, global_max_pool
-from torch_geometric.data import Data
-
-from graphnet.models.gnn.gnn import GNN
-
-
-
-[docs] -class ConvNet(GNN): - """ConvNet (convolutional network) model.""" - - def __init__( - self, - nb_inputs: int, - nb_outputs: int, - nb_intermediate: int = 128, - dropout_ratio: float = 0.3, - ): - """Construct `ConvNet`. - - Args: - nb_inputs: Number of input features, i.e. dimension of input - layer. - nb_outputs: Number of prediction labels, i.e. dimension of - output layer. - nb_intermediate: Number of nodes in intermediate layer(s). - dropout_ratio: Fraction of nodes to drop. - """ - # Base class constructor - super().__init__(nb_inputs, nb_outputs) - - # Member variables - self.nb_intermediate = nb_intermediate - self.nb_intermediate2 = 6 * self.nb_intermediate - - # Architecture configuration - self.conv1 = TAGConv(self.nb_inputs, self.nb_intermediate, 2) - self.conv2 = TAGConv(self.nb_intermediate, self.nb_intermediate, 2) - self.conv3 = TAGConv(self.nb_intermediate, self.nb_intermediate, 2) - - self.batchnorm1 = BatchNorm1d(self.nb_intermediate2) - - self.linear1 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear2 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear3 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear4 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear5 = Linear(self.nb_intermediate2, self.nb_intermediate2) - - self.drop1 = Dropout(dropout_ratio) - self.drop2 = Dropout(dropout_ratio) - self.drop3 = Dropout(dropout_ratio) - self.drop4 = Dropout(dropout_ratio) - self.drop5 = Dropout(dropout_ratio) - - self.out = Linear(self.nb_intermediate2, self.nb_outputs) - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - # Graph convolutional operations - x = F.leaky_relu(self.conv1(x, edge_index)) - x1 = torch.cat( - [ - global_add_pool(x, batch), - global_max_pool(x, batch), - ], - dim=1, - ) - - x = F.leaky_relu(self.conv2(x, edge_index)) - x2 = torch.cat( - [ - global_add_pool(x, batch), - global_max_pool(x, batch), - ], - dim=1, - ) - - x = F.leaky_relu(self.conv3(x, edge_index)) - x3 = torch.cat( - [ - global_add_pool(x, batch), - global_max_pool(x, batch), - ], - dim=1, - ) - - # Skip-cat - x = torch.cat([x1, x2, x3], dim=1) - - # Batch-normalising intermediate features - x = self.batchnorm1(x) - - # Post-processing - x = F.leaky_relu(self.linear1(x)) - x = self.drop1(x) - x = F.leaky_relu(self.linear2(x)) - x = self.drop2(x) - x = F.leaky_relu(self.linear3(x)) - x = self.drop3(x) - x = F.leaky_relu(self.linear4(x)) - x = self.drop4(x) - x = F.leaky_relu(self.linear5(x)) - x = self.drop5(x) - - # Read-out - x = self.out(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/dynedge.html b/_modules/graphnet/models/gnn/dynedge.html deleted file mode 100644 index 1bb966c10..000000000 --- a/_modules/graphnet/models/gnn/dynedge.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.dynedge — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.dynedge

-"""Implementation of the DynEdge GNN model architecture."""
-from typing import List, Optional, Sequence, Tuple, Union
-
-import torch
-from torch import Tensor, LongTensor
-from torch_geometric.data import Data
-from torch_scatter import scatter_max, scatter_mean, scatter_min, scatter_sum
-
-from graphnet.models.components.layers import DynEdgeConv
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.utils import calculate_xyzt_homophily
-
-GLOBAL_POOLINGS = {
-    "min": scatter_min,
-    "max": scatter_max,
-    "sum": scatter_sum,
-    "mean": scatter_mean,
-}
-
-
-
-[docs] -class DynEdge(GNN): - """DynEdge (dynamical edge convolutional) model.""" - - def __init__( - self, - nb_inputs: int, - *, - nb_neighbours: int = 8, - features_subset: Optional[Union[List[int], slice]] = None, - dynedge_layer_sizes: Optional[List[Tuple[int, ...]]] = None, - post_processing_layer_sizes: Optional[List[int]] = None, - readout_layer_sizes: Optional[List[int]] = None, - global_pooling_schemes: Optional[Union[str, List[str]]] = None, - add_global_variables_after_pooling: bool = False, - ): - """Construct `DynEdge`. - - Args: - nb_inputs: Number of input features on each node. - nb_neighbours: Number of neighbours to used in the k-nearest - neighbour clustering which is performed after each (dynamical) - edge convolution. - features_subset: The subset of latent features on each node that - are used as metric dimensions when performing the k-nearest - neighbours clustering. Defaults to [0,1,2]. - dynedge_layer_sizes: The layer sizes, or latent feature dimenions, - used in the `DynEdgeConv` layer. Each entry in - `dynedge_layer_sizes` corresponds to a single `DynEdgeConv` - layer; the integers in the corresponding tuple corresponds to - the layer sizes in the multi-layer perceptron (MLP) that is - applied within each `DynEdgeConv` layer. That is, a list of - size-two tuples means that all `DynEdgeConv` layers contain a - two-layer MLP. - Defaults to [(128, 256), (336, 256), (336, 256), (336, 256)]. - post_processing_layer_sizes: Hidden layer sizes in the MLP - following the skip-concatenation of the outputs of each - `DynEdgeConv` layer. Defaults to [336, 256]. - readout_layer_sizes: Hidden layer sizes in the MLP following the - post-processing _and_ optional global pooling. As this is the - last layer(s) in the model, the last layer in the read-out - yields the output of the `DynEdge` model. Defaults to [128,]. - global_pooling_schemes: The list global pooling schemes to use. - Options are: "min", "max", "mean", and "sum". - add_global_variables_after_pooling: Whether to add global variables - after global pooling. The alternative is to added (distribute) - them to the individual nodes before any convolutional - operations. - """ - # Latent feature subset for computing nearest neighbours in DynEdge. - if features_subset is None: - features_subset = slice(0, 3) - - # DynEdge layer sizes - if dynedge_layer_sizes is None: - dynedge_layer_sizes = [ - ( - 128, - 256, - ), - ( - 336, - 256, - ), - ( - 336, - 256, - ), - ( - 336, - 256, - ), - ] - - assert isinstance(dynedge_layer_sizes, list) - assert len(dynedge_layer_sizes) - assert all(isinstance(sizes, tuple) for sizes in dynedge_layer_sizes) - assert all(len(sizes) > 0 for sizes in dynedge_layer_sizes) - assert all( - all(size > 0 for size in sizes) for sizes in dynedge_layer_sizes - ) - - self._dynedge_layer_sizes = dynedge_layer_sizes - - # Post-processing layer sizes - if post_processing_layer_sizes is None: - post_processing_layer_sizes = [ - 336, - 256, - ] - - assert isinstance(post_processing_layer_sizes, list) - assert len(post_processing_layer_sizes) - assert all(size > 0 for size in post_processing_layer_sizes) - - self._post_processing_layer_sizes = post_processing_layer_sizes - - # Read-out layer sizes - if readout_layer_sizes is None: - readout_layer_sizes = [ - 128, - ] - - assert isinstance(readout_layer_sizes, list) - assert len(readout_layer_sizes) - assert all(size > 0 for size in readout_layer_sizes) - - self._readout_layer_sizes = readout_layer_sizes - - # Global pooling scheme(s) - if isinstance(global_pooling_schemes, str): - global_pooling_schemes = [global_pooling_schemes] - - if isinstance(global_pooling_schemes, list): - for pooling_scheme in global_pooling_schemes: - assert ( - pooling_scheme in GLOBAL_POOLINGS - ), f"Global pooling scheme {pooling_scheme} not supported." - else: - assert global_pooling_schemes is None - - self._global_pooling_schemes = global_pooling_schemes - - if add_global_variables_after_pooling: - assert self._global_pooling_schemes, ( - "No global pooling schemes were request, so cannot add global" - " variables after pooling." - ) - self._add_global_variables_after_pooling = ( - add_global_variables_after_pooling - ) - - # Base class constructor - super().__init__(nb_inputs, self._readout_layer_sizes[-1]) - - # Remaining member variables() - self._activation = torch.nn.LeakyReLU() - self._nb_inputs = nb_inputs - self._nb_global_variables = 5 + nb_inputs - self._nb_neighbours = nb_neighbours - self._features_subset = features_subset - - self._construct_layers() - - def _construct_layers(self) -> None: - """Construct layers (torch.nn.Modules).""" - # Convolutional operations - nb_input_features = self._nb_inputs - if not self._add_global_variables_after_pooling: - nb_input_features += self._nb_global_variables - - self._conv_layers = torch.nn.ModuleList() - nb_latent_features = nb_input_features - for sizes in self._dynedge_layer_sizes: - layers = [] - layer_sizes = [nb_latent_features] + list(sizes) - for ix, (nb_in, nb_out) in enumerate( - zip(layer_sizes[:-1], layer_sizes[1:]) - ): - if ix == 0: - nb_in *= 2 - layers.append(torch.nn.Linear(nb_in, nb_out)) - layers.append(self._activation) - - conv_layer = DynEdgeConv( - torch.nn.Sequential(*layers), - aggr="add", - nb_neighbors=self._nb_neighbours, - features_subset=self._features_subset, - ) - self._conv_layers.append(conv_layer) - - nb_latent_features = nb_out - - # Post-processing operations - nb_latent_features = ( - sum(sizes[-1] for sizes in self._dynedge_layer_sizes) - + nb_input_features - ) - - post_processing_layers = [] - layer_sizes = [nb_latent_features] + list( - self._post_processing_layer_sizes - ) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - post_processing_layers.append(torch.nn.Linear(nb_in, nb_out)) - post_processing_layers.append(self._activation) - - self._post_processing = torch.nn.Sequential(*post_processing_layers) - - # Read-out operations - nb_poolings = ( - len(self._global_pooling_schemes) - if self._global_pooling_schemes - else 1 - ) - nb_latent_features = nb_out * nb_poolings - if self._add_global_variables_after_pooling: - nb_latent_features += self._nb_global_variables - - readout_layers = [] - layer_sizes = [nb_latent_features] + list(self._readout_layer_sizes) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - readout_layers.append(torch.nn.Linear(nb_in, nb_out)) - readout_layers.append(self._activation) - - self._readout = torch.nn.Sequential(*readout_layers) - - def _global_pooling(self, x: Tensor, batch: LongTensor) -> Tensor: - """Perform global pooling.""" - assert self._global_pooling_schemes - pooled = [] - for pooling_scheme in self._global_pooling_schemes: - pooling_fn = GLOBAL_POOLINGS[pooling_scheme] - pooled_x = pooling_fn(x, index=batch, dim=0) - if isinstance(pooled_x, tuple) and len(pooled_x) == 2: - # `scatter_{min,max}`, which return also an argument, vs. - # `scatter_{mean,sum}` - pooled_x, _ = pooled_x - pooled.append(pooled_x) - - return torch.cat(pooled, dim=1) - - def _calculate_global_variables( - self, - x: Tensor, - edge_index: LongTensor, - batch: LongTensor, - *additional_attributes: Tensor, - ) -> Tensor: - """Calculate global variables.""" - # Calculate homophily (scalar variables) - h_x, h_y, h_z, h_t = calculate_xyzt_homophily(x, edge_index, batch) - - # Calculate mean features - global_means = scatter_mean(x, batch, dim=0) - - # Add global variables - global_variables = torch.cat( - [ - global_means, - h_x, - h_y, - h_z, - h_t, - ] - + [attr.unsqueeze(dim=1) for attr in additional_attributes], - dim=1, - ) - - return global_variables - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - global_variables = self._calculate_global_variables( - x, - edge_index, - batch, - torch.log10(data.n_pulses), - ) - - # Distribute global variables out to each node - if not self._add_global_variables_after_pooling: - distribute = ( - batch.unsqueeze(dim=1) == torch.unique(batch).unsqueeze(dim=0) - ).type(torch.float) - - global_variables_distributed = torch.sum( - distribute.unsqueeze(dim=2) - * global_variables.unsqueeze(dim=0), - dim=1, - ) - - x = torch.cat((x, global_variables_distributed), dim=1) - - # DynEdge-convolutions - skip_connections = [x] - for conv_layer in self._conv_layers: - x, edge_index = conv_layer(x, edge_index, batch) - skip_connections.append(x) - - # Skip-cat - x = torch.cat(skip_connections, dim=1) - - # Post-processing - x = self._post_processing(x) - - # (Optional) Global pooling - if self._global_pooling_schemes: - x = self._global_pooling(x, batch=batch) - if self._add_global_variables_after_pooling: - x = torch.cat( - [ - x, - global_variables, - ], - dim=1, - ) - - # Read-out - x = self._readout(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/dynedge_jinst.html b/_modules/graphnet/models/gnn/dynedge_jinst.html deleted file mode 100644 index 79fbe9a81..000000000 --- a/_modules/graphnet/models/gnn/dynedge_jinst.html +++ /dev/null @@ -1,519 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.dynedge_jinst — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.dynedge_jinst

-"""Implementation of the exact DynEdge architecture used in [2209.03042].
-
-Author: Rasmus Oersoe
-"""
-from typing import Optional
-
-import torch
-from torch import Tensor
-from torch_geometric.data import Data
-from torch_scatter import scatter_max, scatter_mean, scatter_min, scatter_sum
-
-from graphnet.models.components.layers import DynEdgeConv
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.utils import calculate_xyzt_homophily
-
-
-
-[docs] -class DynEdgeJINST(GNN): - """DynEdge (dynamical edge convolutional) model used in [2209.03042].""" - - def __init__( - self, - nb_inputs: int, - layer_size_scale: int = 4, - ): - """Construct `DynEdgeJINST`. - - Args: - nb_inputs: Number of input features. - nb_outputs: Number of output features. - layer_size_scale: Integer that scales the size of hidden layers. - """ - # Architecture configuration - c = layer_size_scale - l1, l2, l3, l4, l5, l6 = ( - nb_inputs, - c * 16 * 2, - c * 32 * 2, - c * 42 * 2, - c * 32 * 2, - c * 16 * 2, - ) - - # Base class constructor - super().__init__(nb_inputs, l6) - - # Graph convolutional operations - features_subset = slice(0, 3) - nb_neighbors = 8 - - self.conv_add1 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l1 * 2, l2), - torch.nn.LeakyReLU(), - torch.nn.Linear(l2, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - self.conv_add2 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l3 * 2, l4), - torch.nn.LeakyReLU(), - torch.nn.Linear(l4, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - self.conv_add3 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l3 * 2, l4), - torch.nn.LeakyReLU(), - torch.nn.Linear(l4, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - self.conv_add4 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l3 * 2, l4), - torch.nn.LeakyReLU(), - torch.nn.Linear(l4, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - # Post-processing operations - self.nn1 = torch.nn.Linear(l3 * 4 + l1, l4) - self.nn2 = torch.nn.Linear(l4, l5) - self.nn3 = torch.nn.Linear(4 * l5 + 5, l6) - self.lrelu = torch.nn.LeakyReLU() - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - # Calculate homophily (scalar variables) - h_x, h_y, h_z, h_t = calculate_xyzt_homophily(x, edge_index, batch) - - a, edge_index = self.conv_add1(x, edge_index, batch) - b, edge_index = self.conv_add2(a, edge_index, batch) - c, edge_index = self.conv_add3(b, edge_index, batch) - d, edge_index = self.conv_add4(c, edge_index, batch) - - # Skip-cat - x = torch.cat((x, a, b, c, d), dim=1) - - # Post-processing - x = self.nn1(x) - x = self.lrelu(x) - x = self.nn2(x) - - # Aggregation across nodes - a, _ = scatter_max(x, batch, dim=0) - b, _ = scatter_min(x, batch, dim=0) - c = scatter_sum(x, batch, dim=0) - d = scatter_mean(x, batch, dim=0) - - # Concatenate aggregations and scalar features - x = torch.cat( - ( - a, - b, - c, - d, - h_t.reshape(-1, 1), - h_x.reshape(-1, 1), - h_y.reshape(-1, 1), - h_z.reshape(-1, 1), - data.n_pulses.reshape(-1, 1), - ), - dim=1, - ) - - # Read-out - x = self.lrelu(x) - x = self.nn3(x) - - x = self.lrelu(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/dynedge_kaggle_tito.html b/_modules/graphnet/models/gnn/dynedge_kaggle_tito.html deleted file mode 100644 index 99b914c05..000000000 --- a/_modules/graphnet/models/gnn/dynedge_kaggle_tito.html +++ /dev/null @@ -1,616 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.dynedge_kaggle_tito — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.dynedge_kaggle_tito

-"""Implementation of DynEdge architecture used in.
-
-                    IceCube - Neutrinos in Deep Ice
-Reconstruct the direction of neutrinos from the Universe to the South Pole
-
-Kaggle competition.
-
-Solution by TITO.
-"""
-
-from typing import List, Tuple, Optional
-
-import torch
-from torch import Tensor, LongTensor
-
-from torch_geometric.data import Data
-from torch_geometric.utils import to_dense_batch
-from torch_scatter import scatter_max, scatter_mean, scatter_min, scatter_sum
-
-from graphnet.models.components.layers import DynTrans
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.utils import calculate_xyzt_homophily
-
-GLOBAL_POOLINGS = {
-    "min": scatter_min,
-    "max": scatter_max,
-    "sum": scatter_sum,
-    "mean": scatter_mean,
-}
-
-
-
-[docs] -class DynEdgeTITO(GNN): - """DynEdge (dynamical edge convolutional) model.""" - - def __init__( - self, - nb_inputs: int, - features_subset: slice = slice(0, 4), - dyntrans_layer_sizes: Optional[List[Tuple[int, ...]]] = None, - global_pooling_schemes: List[str] = ["max"], - ): - """Construct `DynEdge`. - - Args: - nb_inputs: Number of input features on each node. - features_subset: The subset of latent features on each node that - are used as metric dimensions when performing the k-nearest - neighbours clustering. Defaults to [0,1,2,3]. - dyntrans_layer_sizes: The layer sizes, or latent feature dimenions, - used in the `DynTrans` layer. - global_pooling_schemes: The list global pooling schemes to use. - Options are: "min", "max", "mean", and "sum". - """ - # DynEdge layer sizes - if dyntrans_layer_sizes is None: - dyntrans_layer_sizes = [ - ( - 256, - 256, - ), - ( - 256, - 256, - ), - ( - 256, - 256, - ), - ] - - assert isinstance(dyntrans_layer_sizes, list) - assert len(dyntrans_layer_sizes) - assert all(isinstance(sizes, tuple) for sizes in dyntrans_layer_sizes) - assert all(len(sizes) > 0 for sizes in dyntrans_layer_sizes) - assert all( - all(size > 0 for size in sizes) for sizes in dyntrans_layer_sizes - ) - - self._dyntrans_layer_sizes = dyntrans_layer_sizes - - # Post-processing layer sizes - post_processing_layer_sizes = [ - 336, - 256, - ] - - self._post_processing_layer_sizes = post_processing_layer_sizes - - # Read-out layer sizes - readout_layer_sizes = [ - 256, - 128, - ] - - self._readout_layer_sizes = readout_layer_sizes - - # Global pooling scheme(s) - if isinstance(global_pooling_schemes, str): - global_pooling_schemes = [global_pooling_schemes] - - if isinstance(global_pooling_schemes, list): - for pooling_scheme in global_pooling_schemes: - assert ( - pooling_scheme in GLOBAL_POOLINGS - ), f"Global pooling scheme {pooling_scheme} not supported." - else: - assert global_pooling_schemes is None - - self._global_pooling_schemes = global_pooling_schemes - - assert self._global_pooling_schemes, ( - "No global pooling schemes were request, so cannot add global" - " variables after pooling." - ) - - # Base class constructor - super().__init__(nb_inputs, self._readout_layer_sizes[-1]) - - # Remaining member variables() - self._activation = torch.nn.LeakyReLU() - self._nb_inputs = nb_inputs - self._nb_global_variables = 5 + nb_inputs - self._features_subset = features_subset - self._construct_layers() - - def _construct_layers(self) -> None: - """Construct layers (torch.nn.Modules).""" - # Convolutional operations - nb_input_features = self._nb_inputs - - self._conv_layers = torch.nn.ModuleList() - nb_latent_features = nb_input_features - for sizes in self._dyntrans_layer_sizes: - conv_layer = DynTrans( - [nb_latent_features] + list(sizes), - aggr="max", - features_subset=self._features_subset, - n_head=8, - ) - self._conv_layers.append(conv_layer) - nb_latent_features = sizes[-1] - - post_processing_layers = [] - layer_sizes = [nb_latent_features] + list( - self._post_processing_layer_sizes - ) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - post_processing_layers.append(torch.nn.Linear(nb_in, nb_out)) - post_processing_layers.append(self._activation) - last_posting_layer_output_dim = nb_out - - self._post_processing = torch.nn.Sequential(*post_processing_layers) - - # Read-out operations - nb_poolings = ( - len(self._global_pooling_schemes) - if self._global_pooling_schemes - else 1 - ) - nb_latent_features = last_posting_layer_output_dim * nb_poolings - nb_latent_features += self._nb_global_variables - - readout_layers = [] - layer_sizes = [nb_latent_features] + list(self._readout_layer_sizes) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - readout_layers.append(torch.nn.Linear(nb_in, nb_out)) - readout_layers.append(self._activation) - - self._readout = torch.nn.Sequential(*readout_layers) - - def _global_pooling(self, x: Tensor, batch: LongTensor) -> Tensor: - """Perform global pooling.""" - assert self._global_pooling_schemes - pooled = [] - for pooling_scheme in self._global_pooling_schemes: - pooling_fn = GLOBAL_POOLINGS[pooling_scheme] - pooled_x = pooling_fn(x, index=batch, dim=0) - if isinstance(pooled_x, tuple) and len(pooled_x) == 2: - # `scatter_{min,max}`, which return also an argument, vs. - # `scatter_{mean,sum}` - pooled_x, _ = pooled_x - pooled.append(pooled_x) - - return torch.cat(pooled, dim=1) - - def _calculate_global_variables( - self, - x: Tensor, - edge_index: LongTensor, - batch: LongTensor, - *additional_attributes: Tensor, - ) -> Tensor: - """Calculate global variables.""" - # Calculate homophily (scalar variables) - h_x, h_y, h_z, h_t = calculate_xyzt_homophily(x, edge_index, batch) - - # Calculate mean features - global_means = scatter_mean(x, batch, dim=0) - - # Add global variables - global_variables = torch.cat( - [ - global_means, - h_x, - h_y, - h_z, - h_t, - ] - + [attr.unsqueeze(dim=1) for attr in additional_attributes], - dim=1, - ) - - return global_variables - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - global_variables = self._calculate_global_variables( - x, - edge_index, - batch, - torch.log10(data.n_pulses), - ) - - # DynEdge-convolutions - for conv_layer in self._conv_layers: - x = conv_layer(x, edge_index, batch) - - x, mask = to_dense_batch(x, batch) - x = x[mask] - - # Post-processing - x = self._post_processing(x) - - # (Optional) Global pooling - x = self._global_pooling(x, batch=batch) - x = torch.cat( - [ - x, - global_variables, - ], - dim=1, - ) - - # Read-out - x = self._readout(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/gnn.html b/_modules/graphnet/models/gnn/gnn.html deleted file mode 100644 index 1e183aaa6..000000000 --- a/_modules/graphnet/models/gnn/gnn.html +++ /dev/null @@ -1,401 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.gnn — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.gnn

-"""Base GNN-specific `Model` class(es)."""
-
-from abc import abstractmethod
-
-from torch import Tensor
-from torch_geometric.data import Data
-
-from graphnet.models import Model
-
-
-
-[docs] -class GNN(Model): - """Base class for all core GNN models in graphnet.""" - - def __init__(self, nb_inputs: int, nb_outputs: int) -> None: - """Construct `GNN`.""" - # Base class constructor - super().__init__() - - # Member variables - self._nb_inputs = nb_inputs - self._nb_outputs = nb_outputs - - @property - def nb_inputs(self) -> int: - """Return number of input features.""" - return self._nb_inputs - - @property - def nb_outputs(self) -> int: - """Return number of output features.""" - return self._nb_outputs - -
-[docs] - @abstractmethod - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass in model."""
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/edges/edges.html b/_modules/graphnet/models/graphs/edges/edges.html deleted file mode 100644 index 399251ee4..000000000 --- a/_modules/graphnet/models/graphs/edges/edges.html +++ /dev/null @@ -1,559 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.edges.edges — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.edges.edges

-"""Class(es) for building/connecting graphs."""
-
-from typing import List
-from abc import abstractmethod, ABC
-
-import torch
-from torch_geometric.nn import knn_graph, radius_graph
-from torch_geometric.data import Data
-
-from graphnet.models.utils import calculate_distance_matrix
-from graphnet.models import Model
-
-
-
-[docs] -class EdgeDefinition(Model): # pylint: disable=too-few-public-methods - """Base class for graph building.""" - -
-[docs] - def forward(self, graph: Data) -> Data: - """Construct edges based on problem specific implementation of. - - ´_construct_edges´ - - Args: - graph: a graph without edges - - Returns: - graph: a graph with edges - """ - if graph.edge_index is not None: - self.warning_once( - "GraphBuilder received graph with pre-existing " - "structure. Will overwrite." - ) - return self._construct_edges(graph)
- - - @abstractmethod - def _construct_edges(self, graph: Data) -> Data: - """Construct edges and assign them to graph. I.e. ´graph.edge_index = edge_index´. - - Args: - graph: graph without edges - - Returns: - graph: graph with edges assigned. - """
- - - -
-[docs] -class KNNEdges(EdgeDefinition): # pylint: disable=too-few-public-methods - """Builds edges from the k-nearest neighbours.""" - - def __init__( - self, - nb_nearest_neighbours: int, - columns: List[int] = [0, 1, 2], - ): - """K-NN Edge definition. - - Will connect nodes together with their ´nb_nearest_neighbours´ - nearest neighbours in the feature space given by ´columns´. - - Args: - nb_nearest_neighbours: number of neighbours. - columns: Node features to use for distance calculation. - Defaults to [0,1,2]. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Member variable(s) - self._nb_nearest_neighbours = nb_nearest_neighbours - self._columns = columns - - def _construct_edges(self, graph: Data) -> Data: - """Define K-NN edges.""" - graph.edge_index = knn_graph( - graph.x[:, self._columns], - self._nb_nearest_neighbours, - graph.batch, - ).to(self.device) - - return graph
- - - -
-[docs] -class RadialEdges(EdgeDefinition): - """Builds graph from a sphere of chosen radius centred at each node.""" - - def __init__( - self, - radius: float, - columns: List[int] = [0, 1, 2], - ): - """Radial edges. - - Connects each node to other nodes that are within a sphere of - radius ´r´ centered at the node. The feature space of ´r´ is defined - by ´columns´ - - Args: - radius: radius of sphere - columns: columns of the node feature matrix used. - Defaults to [0,1,2]. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Member variable(s) - self._radius = radius - self._columns = columns - - def _construct_edges(self, graph: Data) -> Data: - """Define radial edges.""" - graph.edge_index = radius_graph( - graph.x[:, self._columns], - self._radius, - graph.batch, - ).to(self.device) - - return graph
- - - -
-[docs] -class EuclideanEdges(EdgeDefinition): # pylint: disable=too-few-public-methods - """Builds edges according to Euclidean distance between nodes. - - See https://arxiv.org/pdf/1809.06166.pdf. - """ - - def __init__( - self, - sigma: float, - threshold: float = 0.0, - columns: List[int] = None, - ): - """Construct `EuclideanEdges`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Check(s) - if columns is None: - columns = [0, 1, 2] - - # Member variable(s) - self._sigma = sigma - self._threshold = threshold - self._columns = columns - - def _construct_edges(self, graph: Data) -> Data: - """Forward pass.""" - # Constructs the adjacency matrix from the raw, DOM-level data and - # returns this matrix - if graph.edge_index is not None: - self.info( - "WARNING: GraphBuilder received graph with pre-existing " - "structure. Will overwrite." - ) - - xyz_coords = graph.x[:, self._columns] - - # Construct block-diagonal matrix indicating whether pulses belong to - # the same event in the batch - batch_mask = graph.batch.unsqueeze(dim=0) == graph.batch.unsqueeze( - dim=1 - ) - - distance_matrix = calculate_distance_matrix(xyz_coords) - affinity_matrix = torch.exp( - -0.5 * distance_matrix**2 / self._sigma**2 - ) - - # Use softmax to normalise all adjacencies to one for each node - exp_row_sums = torch.exp(affinity_matrix).sum(axis=1) - weighted_adj_matrix = torch.exp( - affinity_matrix - ) / exp_row_sums.unsqueeze(dim=1) - - # Only include edges with weights that exceed the chosen threshold (and - # are part of the same event) - sources, targets = torch.where( - (weighted_adj_matrix > self._threshold) & (batch_mask) - ) - edge_weights = weighted_adj_matrix[sources, targets] - - graph.edge_index = torch.stack((sources, targets)) - graph.edge_weight = edge_weights - - return graph
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/graph_definition.html b/_modules/graphnet/models/graphs/graph_definition.html deleted file mode 100644 index 3841cc045..000000000 --- a/_modules/graphnet/models/graphs/graph_definition.html +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.graph_definition — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.graph_definition

-"""Modules for defining graphs.
-
-These are self-contained graph definitions that hold all the graph-altering
-code in graphnet. These modules define what the GNNs sees as input and can be
-passed to dataloaders during training and deployment.
-"""
-
-
-from typing import Any, List, Optional, Dict, Callable, Union
-import torch
-from torch_geometric.data import Data
-import numpy as np
-from numpy.random import default_rng, Generator
-
-from graphnet.models.detector import Detector
-from .edges import EdgeDefinition
-from .nodes import NodeDefinition, NodesAsPulses
-from graphnet.models import Model
-
-
-
-[docs] -class GraphDefinition(Model): - """An Abstract class to create graph definitions from.""" - - def __init__( - self, - detector: Detector, - node_definition: NodeDefinition = NodesAsPulses(), - edge_definition: Optional[EdgeDefinition] = None, - node_feature_names: Optional[List[str]] = None, - dtype: Optional[torch.dtype] = torch.float, - perturbation_dict: Optional[Dict[str, float]] = None, - seed: Optional[Union[int, Generator]] = None, - ): - """Construct ´GraphDefinition´. The ´detector´ holds. - - ´Detector´-specific code. E.g. scaling/standardization and geometry - tables. - - ´node_definition´ defines the nodes in the graph. - - ´edge_definition´ defines the connectivity of the nodes in the graph. - - Args: - detector: The corresponding ´Detector´ representing the data. - node_definition: Definition of nodes. Defaults to NodesAsPulses. - edge_definition: Definition of edges. Defaults to None. - node_feature_names: Names of node feature columns. Defaults to None - dtype: data type used for node features. e.g. ´torch.float´ - perturbation_dict: Dictionary mapping a feature name to a standard - deviation according to which the values for this - feature should be randomly perturbed. Defaults - to None. - seed: seed or Generator used to randomly sample perturbations. - Defaults to None. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Member Variables - self._detector = detector - self._edge_definition = edge_definition - self._node_definition = node_definition - self._perturbation_dict = perturbation_dict - - if node_feature_names is None: - # Assume all features in Detector is used. - node_feature_names = list(self._detector.feature_map().keys()) # type: ignore - self._node_feature_names = node_feature_names - - # Set data type - self.to(dtype) - - # Set Input / Output dimensions - self._node_definition.set_number_of_inputs( - node_feature_names=node_feature_names - ) - self.nb_inputs = len(self._node_feature_names) - self.nb_outputs = self._node_definition.nb_outputs - - # Set perturbation_cols if needed - if isinstance(self._perturbation_dict, dict): - self._perturbation_cols = [ - self._node_feature_names.index(key) - for key in self._perturbation_dict.keys() - ] - if seed is not None: - if isinstance(seed, int): - self.rng = default_rng(seed) - elif isinstance(seed, Generator): - self.rng = seed - else: - raise ValueError( - "Invalid seed. Must be an int or a numpy Generator." - ) - else: - self.rng = default_rng() - -
-[docs] - def forward( # type: ignore - self, - node_features: np.ndarray, - node_feature_names: List[str], - truth_dicts: Optional[List[Dict[str, Any]]] = None, - custom_label_functions: Optional[Dict[str, Callable[..., Any]]] = None, - loss_weight_column: Optional[str] = None, - loss_weight: Optional[float] = None, - loss_weight_default_value: Optional[float] = None, - data_path: Optional[str] = None, - ) -> Data: - """Construct graph as ´Data´ object. - - Args: - node_features: node features for graph. Shape ´[num_nodes, d]´ - node_feature_names: name of each column. Shape ´[,d]´. - truth_dicts: Dictionary containing truth labels. - custom_label_functions: Custom label functions. See https://github.com/graphnet-team/graphnet/blob/main/GETTING_STARTED.md#adding-custom-truth-labels. - loss_weight_column: Name of column that holds loss weight. - Defaults to None. - loss_weight: Loss weight associated with event. Defaults to None. - loss_weight_default_value: default value for loss weight. - Used in instances where some events have - no pre-defined loss weight. Defaults to None. - data_path: Path to dataset data files. Defaults to None. - - Returns: - graph - """ - # Checks - self._validate_input( - node_features=node_features, node_feature_names=node_feature_names - ) - - # Gaussian perturbation of each column if perturbation dict is given - node_features = self._perturb_input(node_features) - - # Transform to pytorch tensor - node_features = torch.tensor(node_features, dtype=self.dtype) - - # Standardize / Scale node features - node_features = self._detector(node_features, node_feature_names) - - # Create graph - graph = self._node_definition(node_features) - - # Attach number of pulses as static attribute. - graph.n_pulses = torch.tensor(len(node_features), dtype=torch.int32) - - # Assign edges - if self._edge_definition is not None: - graph = self._edge_definition(graph) - else: - - self.warning_once( - """No EdgeDefinition provided. - Graphs will not have edges defined!""" # noqa - ) - - # Attach data path - useful for Ensemble datasets. - if data_path is not None: - graph["dataset_path"] = data_path - - # Attach loss weights if they exist - graph = self._add_loss_weights( - graph=graph, - loss_weight=loss_weight, - loss_weight_column=loss_weight_column, - loss_weight_default_value=loss_weight_default_value, - ) - - # Attach default truth labels and node truths - if truth_dicts is not None: - graph = self._add_truth(graph=graph, truth_dicts=truth_dicts) - - # Attach custom truth labels - if custom_label_functions is not None: - graph = self._add_custom_labels( - graph=graph, custom_label_functions=custom_label_functions - ) - - # Attach node features as seperate fields. MAY NOT CONTAIN 'x' - graph = self._add_features_individually( - graph=graph, node_feature_names=node_feature_names - ) - - # Add GraphDefinition Stamp - graph["graph_definition"] = self.__class__.__name__ - return graph
- - - def _validate_input( - self, node_features: np.array, node_feature_names: List[str] - ) -> None: - # node feature matrix dimension check - assert node_features.shape[1] == len(node_feature_names) - - # check that provided features for input is the same that the ´Graph´ - # was instantiated with. - assert len(node_feature_names) == len( - self._node_feature_names - ), f"""Input features ({node_feature_names}) is not what - {self.__class__.__name__} was instatiated - with ({self._node_feature_names})""" # noqa - for idx in range(len(node_feature_names)): - assert ( - node_feature_names[idx] == self._node_feature_names[idx] - ), f""" Order of node features in data - are not the same as expected. Got {node_feature_names} - vs. {self._node_feature_names}""" # noqa - - def _perturb_input(self, node_features: np.ndarray) -> np.ndarray: - if isinstance(self._perturbation_dict, dict): - self.warning_once( - f"""Will randomly perturb - {list(self._perturbation_dict.keys())} - using stds {self._perturbation_dict.values()}""" # noqa - ) - perturbed_features = self.rng.normal( - loc=node_features[:, self._perturbation_cols], - scale=np.array( - list(self._perturbation_dict.values()), dtype=float - ), - ) - node_features[:, self._perturbation_cols] = perturbed_features - return node_features - - def _add_loss_weights( - self, - graph: Data, - loss_weight_column: Optional[str] = None, - loss_weight: Optional[float] = None, - loss_weight_default_value: Optional[float] = None, - ) -> Data: - """Attempt to store a loss weight in the graph for use during training. - - I.e. `graph[loss_weight_column] = loss_weight` - - Args: - loss_weight: The non-negative weight to be stored. - graph: Data object representing the event. - loss_weight_column: The name under which the weight is stored in - the graph. - loss_weight_default_value: The default value used if - none was retrieved. - - Returns: - A graph with loss weight added, if available. - """ - # Add loss weight to graph. - if loss_weight is not None and loss_weight_column is not None: - # No loss weight was retrieved, i.e., it is missing for the current - # event. - if loss_weight < 0: - if loss_weight_default_value is None: - raise ValueError( - "At least one event is missing an entry in " - f"{loss_weight_column} " - "but loss_weight_default_value is None." - ) - graph[loss_weight_column] = torch.tensor( - self._loss_weight_default_value, dtype=self.dtype - ).reshape(-1, 1) - else: - graph[loss_weight_column] = torch.tensor( - loss_weight, dtype=self.dtype - ).reshape(-1, 1) - return graph - - def _add_truth( - self, graph: Data, truth_dicts: List[Dict[str, Any]] - ) -> Data: - """Add truth labels from ´truth_dicts´ to ´graph´. - - I.e. ´graph[key] = truth_dict[key]´ - - - Args: - graph: graph where the label will be stored - truth_dicts: dictionary containing the labels - - Returns: - graph with labels - """ - # Write attributes, either target labels, truth info or original - # features. - for truth_dict in truth_dicts: - for key, value in truth_dict.items(): - try: - graph[key] = torch.tensor(value) - except TypeError: - # Cannot convert `value` to Tensor due to its data type, - # e.g. `str`. - self.debug( - ( - f"Could not assign `{key}` with type " - f"'{type(value).__name__}' as attribute to graph." - ) - ) - return graph - - def _add_features_individually( - self, - graph: Data, - node_feature_names: List[str], - ) -> Data: - # Additionally add original features as (static) attributes - graph.features = node_feature_names - for index, feature in enumerate(node_feature_names): - if feature not in ["x"]: # reserved for node features. - graph[feature] = graph.x[:, index].detach() - else: - self.warning_once( - """Cannot assign graph['x']. This field is reserved for - node features. Please rename your input feature.""" - ) # noqa - - return graph - - def _add_custom_labels( - self, - graph: Data, - custom_label_functions: Dict[str, Callable[..., Any]], - ) -> Data: - # Add custom labels to the graph - for key, fn in custom_label_functions.items(): - graph[key] = fn(graph) - return graph
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/graphs.html b/_modules/graphnet/models/graphs/graphs.html deleted file mode 100644 index 2e0d1f9f9..000000000 --- a/_modules/graphnet/models/graphs/graphs.html +++ /dev/null @@ -1,419 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.graphs — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.graphs

-"""A module containing different graph representations in GraphNeT."""
-
-from typing import List, Optional, Dict, Union
-import torch
-from numpy.random import Generator
-
-from .graph_definition import GraphDefinition
-from graphnet.models.detector import Detector
-from graphnet.models.graphs.edges import EdgeDefinition, KNNEdges
-from graphnet.models.graphs.nodes import NodeDefinition, NodesAsPulses
-
-
-
-[docs] -class KNNGraph(GraphDefinition): - """A Graph representation where Edges are drawn to nearest neighbours.""" - - def __init__( - self, - detector: Detector, - node_definition: NodeDefinition = NodesAsPulses(), - node_feature_names: Optional[List[str]] = None, - dtype: Optional[torch.dtype] = torch.float, - perturbation_dict: Optional[Dict[str, float]] = None, - seed: Optional[Union[int, Generator]] = None, - nb_nearest_neighbours: int = 8, - columns: List[int] = [0, 1, 2], - ) -> None: - """Construct k-nn graph representation. - - Args: - detector: Detector that represents your data. - node_definition: Definition of nodes in the graph. - node_feature_names: Name of node features. - dtype: data type for node features. - perturbation_dict: Dictionary mapping a feature name to a standard - deviation according to which the values for this - feature should be randomly perturbed. Defaults - to None. - seed: seed or Generator used to randomly sample perturbations. - Defaults to None. - nb_nearest_neighbours: Number of edges for each node. Defaults to 8. - columns: node feature columns used for distance calculation - . Defaults to [0, 1, 2]. - """ - # Base class constructor - super().__init__( - detector=detector, - node_definition=node_definition, - edge_definition=KNNEdges( - nb_nearest_neighbours=nb_nearest_neighbours, - columns=columns, - ), - dtype=dtype, - node_feature_names=node_feature_names, - perturbation_dict=perturbation_dict, - seed=seed, - )
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/nodes/nodes.html b/_modules/graphnet/models/graphs/nodes/nodes.html deleted file mode 100644 index 14aecf400..000000000 --- a/_modules/graphnet/models/graphs/nodes/nodes.html +++ /dev/null @@ -1,442 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.nodes.nodes — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.nodes.nodes

-"""Class(es) for building/connecting graphs."""
-
-from typing import List
-from abc import abstractmethod
-
-import torch
-from torch_geometric.data import Data
-
-from graphnet.utilities.decorators import final
-from graphnet.models import Model
-
-
-
-[docs] -class NodeDefinition(Model): # pylint: disable=too-few-public-methods - """Base class for graph building.""" - - def __init__(self) -> None: - """Construct `Detector`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - -
-[docs] - @final - def forward(self, x: torch.tensor) -> Data: - """Construct nodes from raw node features. - - Args: - x: standardized node features with shape ´[num_pulses, d]´, - where ´d´ is the number of node features. - - Returns: - graph: a graph without edges - """ - graph = self._construct_nodes(x) - return graph
- - - @property - def nb_outputs(self) -> int: - """Return number of output features. - - This the default, but may be overridden by specific inheriting classes. - """ - return self.nb_inputs - -
-[docs] - @final - def set_number_of_inputs(self, node_feature_names: List[str]) -> None: - """Return number of inputs expected by node definition. - - Args: - node_feature_names: name of each node feature column. - """ - assert isinstance(node_feature_names, list) - self.nb_inputs = len(node_feature_names)
- - - @abstractmethod - def _construct_nodes(self, x: torch.tensor) -> Data: - """Construct nodes from raw node features ´x´. - - Args: - x: standardized node features with shape ´[num_pulses, d]´, - where ´d´ is the number of node features. - - Returns: - graph: graph without edges. - """
- - - -
-[docs] -class NodesAsPulses(NodeDefinition): - """Represent each measured pulse of Cherenkov Radiation as a node.""" - - def _construct_nodes(self, x: torch.Tensor) -> Data: - return Data(x=x)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/model.html b/_modules/graphnet/models/model.html deleted file mode 100644 index 2d895f29e..000000000 --- a/_modules/graphnet/models/model.html +++ /dev/null @@ -1,732 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.model — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.model

-"""Base class(es) for building models."""
-
-from abc import ABC, abstractmethod
-from collections import OrderedDict
-import dill
-import os.path
-from typing import Any, Dict, List, Optional, Union
-
-import numpy as np
-import pandas as pd
-from pytorch_lightning import Trainer, LightningModule
-from pytorch_lightning.callbacks.callback import Callback
-from pytorch_lightning.callbacks import EarlyStopping
-from pytorch_lightning.loggers.logger import Logger as LightningLogger
-import torch
-from torch import Tensor
-from torch.utils.data import DataLoader, SequentialSampler
-from torch_geometric.data import Data
-
-from graphnet.utilities.logging import Logger
-from graphnet.utilities.config import (
-    Configurable,
-    ModelConfig,
-    ModelConfigSaverABC,
-)
-from graphnet.training.callbacks import ProgressBar
-
-
-
-[docs] -class Model( - Logger, Configurable, LightningModule, ABC, metaclass=ModelConfigSaverABC -): - """Base class for all models in graphnet.""" - -
-[docs] - @abstractmethod - def forward(self, x: Union[Tensor, Data]) -> Union[Tensor, Data]: - """Forward pass."""
- - - @staticmethod - def _construct_trainer( - max_epochs: int = 10, - gpus: Optional[Union[List[int], int]] = None, - callbacks: Optional[List[Callback]] = None, - ckpt_path: Optional[str] = None, - logger: Optional[LightningLogger] = None, - log_every_n_steps: int = 1, - gradient_clip_val: Optional[float] = None, - distribution_strategy: Optional[str] = "ddp", - **trainer_kwargs: Any, - ) -> Trainer: - - if gpus: - accelerator = "gpu" - devices = gpus - else: - accelerator = "cpu" - devices = 1 - - trainer = Trainer( - accelerator=accelerator, - devices=devices, - max_epochs=max_epochs, - callbacks=callbacks, - log_every_n_steps=log_every_n_steps, - logger=logger, - gradient_clip_val=gradient_clip_val, - strategy=distribution_strategy, - default_root_dir=ckpt_path, - **trainer_kwargs, - ) - - return trainer - -
-[docs] - def fit( - self, - train_dataloader: DataLoader, - val_dataloader: Optional[DataLoader] = None, - *, - max_epochs: int = 10, - gpus: Optional[Union[List[int], int]] = None, - callbacks: Optional[List[Callback]] = None, - ckpt_path: Optional[str] = None, - logger: Optional[LightningLogger] = None, - log_every_n_steps: int = 1, - gradient_clip_val: Optional[float] = None, - distribution_strategy: Optional[str] = "ddp", - **trainer_kwargs: Any, - ) -> None: - """Fit `Model` using `pytorch_lightning.Trainer`.""" - # Checks - if callbacks is None: - callbacks = self._create_default_callbacks( - val_dataloader=val_dataloader, - ) - elif val_dataloader is not None: - callbacks = self._add_early_stopping( - val_dataloader=val_dataloader, callbacks=callbacks - ) - - self.train(mode=True) - trainer = self._construct_trainer( - max_epochs=max_epochs, - gpus=gpus, - callbacks=callbacks, - ckpt_path=ckpt_path, - logger=logger, - log_every_n_steps=log_every_n_steps, - gradient_clip_val=gradient_clip_val, - distribution_strategy=distribution_strategy, - **trainer_kwargs, - ) - - try: - trainer.fit( - self, train_dataloader, val_dataloader, ckpt_path=ckpt_path - ) - except KeyboardInterrupt: - self.warning("[ctrl+c] Exiting gracefully.") - pass
- - - def _create_default_callbacks(self, val_dataloader: DataLoader) -> List: - callbacks = [ProgressBar()] - callbacks = self._add_early_stopping( - val_dataloader=val_dataloader, callbacks=callbacks - ) - return callbacks - - def _add_early_stopping( - self, val_dataloader: DataLoader, callbacks: List - ) -> List: - if val_dataloader is None: - return callbacks - has_early_stopping = False - assert isinstance(callbacks, list) - for callback in callbacks: - if isinstance(callback, EarlyStopping): - has_early_stopping = True - - if not has_early_stopping: - callbacks.append( - EarlyStopping( - monitor="val_loss", - patience=5, - ) - ) - self.warning_once( - "Got validation dataloader but no EarlyStopping callback. An " - "EarlyStopping callback has been added automatically with " - "patience=5 and monitor = 'val_loss'." - ) - return callbacks - -
-[docs] - def predict( - self, - dataloader: DataLoader, - gpus: Optional[Union[List[int], int]] = None, - distribution_strategy: Optional[str] = "auto", - ) -> List[Tensor]: - """Return predictions for `dataloader`. - - Returns a list of Tensors, one for each model output. - """ - self.train(mode=False) - - callbacks = self._create_default_callbacks( - val_dataloader=None, - ) - - inference_trainer = self._construct_trainer( - gpus=gpus, - distribution_strategy=distribution_strategy, - callbacks=callbacks, - ) - - predictions_list = inference_trainer.predict(self, dataloader) - assert len(predictions_list), "Got no predictions" - - nb_outputs = len(predictions_list[0]) - predictions: List[Tensor] = [ - torch.cat([preds[ix] for preds in predictions_list], dim=0) - for ix in range(nb_outputs) - ] - - return predictions
- - -
-[docs] - def predict_as_dataframe( - self, - dataloader: DataLoader, - prediction_columns: List[str], - *, - additional_attributes: Optional[List[str]] = None, - gpus: Optional[Union[List[int], int]] = None, - distribution_strategy: Optional[str] = "auto", - ) -> pd.DataFrame: - """Return predictions for `dataloader` as a DataFrame. - - Include `additional_attributes` as additional columns in the output - DataFrame. - """ - # Check(s) - if additional_attributes is None: - additional_attributes = [] - assert isinstance(additional_attributes, list) - - if ( - not isinstance(dataloader.sampler, SequentialSampler) - and additional_attributes - ): - print(dataloader.sampler) - raise UserWarning( - "DataLoader has a `sampler` that is not `SequentialSampler`, " - "indicating that shuffling is enabled. Using " - "`predict_as_dataframe` with `additional_attributes` assumes " - "that the sequence of batches in `dataloader` are " - "deterministic. Either call this method a `dataloader` which " - "doesn't resample batches; or do not request " - "`additional_attributes`." - ) - self.info(f"Column names for predictions are: \n {prediction_columns}") - predictions_torch = self.predict( - dataloader=dataloader, - gpus=gpus, - distribution_strategy=distribution_strategy, - ) - predictions = ( - torch.cat(predictions_torch, dim=1).detach().cpu().numpy() - ) - assert len(prediction_columns) == predictions.shape[1], ( - f"Number of provided column names ({len(prediction_columns)}) and " - f"number of output columns ({predictions.shape[1]}) don't match." - ) - - # Get additional attributes - attributes: Dict[str, List[np.ndarray]] = OrderedDict( - [(attr, []) for attr in additional_attributes] - ) - - for batch in dataloader: - for attr in attributes: - attribute = batch[attr] - if isinstance(attribute, torch.Tensor): - attribute = attribute.detach().cpu().numpy() - - # Check if node level predictions - # If true, additional attributes are repeated - # to make dimensions fit - if len(predictions) != len(dataloader.dataset): - if len(attribute) < np.sum( - batch.n_pulses.detach().cpu().numpy() - ): - attribute = np.repeat( - attribute, batch.n_pulses.detach().cpu().numpy() - ) - try: - assert len(attribute) == len(batch.x) - except AssertionError: - self.warning_once( - "Could not automatically adjust length" - f"of additional attribute {attr} to match length of" - f"predictions. Make sure {attr} is a graph-level or" - "node-level attribute. Attribute skipped." - ) - pass - attributes[attr].extend(attribute) - - data = np.concatenate( - [predictions] - + [ - np.asarray(values)[:, np.newaxis] - for values in attributes.values() - ], - axis=1, - ) - - results = pd.DataFrame( - data, columns=prediction_columns + additional_attributes - ) - return results
- - -
-[docs] - def save(self, path: str) -> None: - """Save entire model to `path`.""" - if not path.endswith(".pth"): - self.info( - "It is recommended to use the .pth suffix for model files." - ) - dirname = os.path.dirname(path) - if dirname: - os.makedirs(dirname, exist_ok=True) - torch.save(self.cpu(), path, pickle_module=dill) - self.info(f"Model saved to {path}")
- - -
-[docs] - @classmethod - def load(cls, path: str) -> "Model": - """Load entire model from `path`.""" - return torch.load(path, pickle_module=dill)
- - -
-[docs] - def save_state_dict(self, path: str) -> None: - """Save model `state_dict` to `path`.""" - if not path.endswith(".pth"): - self.info( - "It is recommended to use the .pth suffix for state_dict files." - ) - torch.save(self.cpu().state_dict(), path) - self.info(f"Model state_dict saved to {path}")
- - -
-[docs] - def load_state_dict( - self, path: Union[str, Dict], **kargs: Optional[Any] - ) -> "Model": # pylint: disable=arguments-differ - """Load model `state_dict` from `path`.""" - if isinstance(path, str): - state_dict = torch.load(path) - else: - state_dict = path - return super().load_state_dict(state_dict, **kargs)
- - -
-[docs] - @classmethod - def from_config( # type: ignore[override] - cls, - source: Union[ModelConfig, str], - trust: bool = False, - load_modules: Optional[List[str]] = None, - ) -> "Model": - """Construct `Model` instance from `source` configuration. - - Arguments: - trust: Whether to trust the ModelConfig file enough to `eval(...)` - any lambda function expressions contained. - load_modules: List of modules used in the definition of the model - which, as a consequence, need to be loaded into the global - namespace. Defaults to loading `torch`. - - Raises: - ValueError: If the ModelConfig contains lambda functions but - `trust = False`. - """ - if isinstance(source, str): - source = ModelConfig.load(source) - - assert isinstance( - source, ModelConfig - ), f"Argument `source` of type ({type(source)}) is not a `ModelConfig" - - return source._construct_model(trust, load_modules)
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/standard_model.html b/_modules/graphnet/models/standard_model.html deleted file mode 100644 index 29381cb6c..000000000 --- a/_modules/graphnet/models/standard_model.html +++ /dev/null @@ -1,602 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.standard_model — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.standard_model

-"""Standard model class(es)."""
-
-from typing import Any, Dict, List, Optional, Union
-
-import torch
-from torch import Tensor
-from torch.nn import ModuleList
-from torch.optim import Adam
-from torch.utils.data import DataLoader
-from torch_geometric.data import Data
-import pandas as pd
-
-from graphnet.models.graphs import GraphDefinition
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.model import Model
-from graphnet.models.task import Task
-
-
-
-[docs] -class StandardModel(Model): - """Main class for standard models in graphnet. - - This class chains together the different elements of a complete GNN-based - model (detector read-in, GNN architecture, and task-specific read-outs). - """ - - def __init__( - self, - *, - graph_definition: GraphDefinition, - gnn: GNN, - tasks: Union[Task, List[Task]], - optimizer_class: type = Adam, - optimizer_kwargs: Optional[Dict] = None, - scheduler_class: Optional[type] = None, - scheduler_kwargs: Optional[Dict] = None, - scheduler_config: Optional[Dict] = None, - ) -> None: - """Construct `StandardModel`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Check(s) - if isinstance(tasks, Task): - tasks = [tasks] - assert isinstance(tasks, (list, tuple)) - assert all(isinstance(task, Task) for task in tasks) - assert isinstance(graph_definition, GraphDefinition) - assert isinstance(gnn, GNN) - - # Member variable(s) - self._graph_definition = graph_definition - self._gnn = gnn - self._tasks = ModuleList(tasks) - self._optimizer_class = optimizer_class - self._optimizer_kwargs = optimizer_kwargs or dict() - self._scheduler_class = scheduler_class - self._scheduler_kwargs = scheduler_kwargs or dict() - self._scheduler_config = scheduler_config or dict() - - # set dtype of GNN from graph_definition - self._gnn.type(self._graph_definition._dtype) - - @property - def target_labels(self) -> List[str]: - """Return target label.""" - return [label for task in self._tasks for label in task._target_labels] - - @property - def prediction_labels(self) -> List[str]: - """Return prediction labels.""" - return [ - label for task in self._tasks for label in task._prediction_labels - ] - -
-[docs] - def configure_optimizers(self) -> Dict[str, Any]: - """Configure the model's optimizer(s).""" - optimizer = self._optimizer_class( - self.parameters(), **self._optimizer_kwargs - ) - config = { - "optimizer": optimizer, - } - if self._scheduler_class is not None: - scheduler = self._scheduler_class( - optimizer, **self._scheduler_kwargs - ) - config.update( - { - "lr_scheduler": { - "scheduler": scheduler, - **self._scheduler_config, - }, - } - ) - return config
- - -
-[docs] - def forward(self, data: Data) -> List[Union[Tensor, Data]]: - """Forward pass, chaining model components.""" - assert isinstance(data, Data) - x = self._gnn(data) - preds = [task(x) for task in self._tasks] - return preds
- - -
-[docs] - def shared_step(self, batch: Data, batch_idx: int) -> Tensor: - """Perform shared step. - - Applies the forward pass and the following loss calculation, shared - between the training and validation step. - """ - preds = self(batch) - loss = self.compute_loss(preds, batch) - return loss
- - -
-[docs] - def training_step(self, train_batch: Data, batch_idx: int) -> Tensor: - """Perform training step.""" - loss = self.shared_step(train_batch, batch_idx) - self.log( - "train_loss", - loss, - batch_size=self._get_batch_size(train_batch), - prog_bar=True, - on_epoch=True, - on_step=False, - sync_dist=True, - ) - return loss
- - -
-[docs] - def validation_step(self, val_batch: Data, batch_idx: int) -> Tensor: - """Perform validation step.""" - loss = self.shared_step(val_batch, batch_idx) - self.log( - "val_loss", - loss, - batch_size=self._get_batch_size(val_batch), - prog_bar=True, - on_epoch=True, - on_step=False, - sync_dist=True, - ) - return loss
- - -
-[docs] - def compute_loss( - self, preds: Tensor, data: Data, verbose: bool = False - ) -> Tensor: - """Compute and sum losses across tasks.""" - losses = [ - task.compute_loss(pred, data) - for task, pred in zip(self._tasks, preds) - ] - if verbose: - self.info(f"{losses}") - assert all( - loss.dim() == 0 for loss in losses - ), "Please reduce loss for each task separately" - return torch.sum(torch.stack(losses))
- - - def _get_batch_size(self, data: Data) -> int: - return torch.numel(torch.unique(data.batch)) - -
-[docs] - def inference(self) -> None: - """Activate inference mode.""" - for task in self._tasks: - task.inference()
- - -
-[docs] - def train(self, mode: bool = True) -> "Model": - """Deactivate inference mode.""" - super().train(mode) - if mode: - for task in self._tasks: - task.train_eval() - return self
- - -
-[docs] - def predict( - self, - dataloader: DataLoader, - gpus: Optional[Union[List[int], int]] = None, - distribution_strategy: Optional[str] = "auto", - ) -> List[Tensor]: - """Return predictions for `dataloader`.""" - self.inference() - return super().predict( - dataloader=dataloader, - gpus=gpus, - distribution_strategy=distribution_strategy, - )
- - -
-[docs] - def predict_as_dataframe( - self, - dataloader: DataLoader, - prediction_columns: Optional[List[str]] = None, - *, - additional_attributes: Optional[List[str]] = None, - gpus: Optional[Union[List[int], int]] = None, - distribution_strategy: Optional[str] = "auto", - ) -> pd.DataFrame: - """Return predictions for `dataloader` as a DataFrame. - - Include `additional_attributes` as additional columns in the output - DataFrame. - """ - if prediction_columns is None: - prediction_columns = self.prediction_labels - return super().predict_as_dataframe( - dataloader=dataloader, - prediction_columns=prediction_columns, - additional_attributes=additional_attributes, - gpus=gpus, - distribution_strategy=distribution_strategy, - )
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/task/classification.html b/_modules/graphnet/models/task/classification.html deleted file mode 100644 index b3c6c9440..000000000 --- a/_modules/graphnet/models/task/classification.html +++ /dev/null @@ -1,411 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.task.classification — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.task.classification

-"""Classification-specific `Model` class(es)."""
-
-from typing import Any
-
-import torch
-from torch import Tensor
-
-from graphnet.models.task import Task, IdentityTask
-
-
-
-[docs] -class MulticlassClassificationTask(IdentityTask): - """General task for classifying any number of classes. - - Requires the same number of input features as the number of classes being - predicted. Returns the untransformed latent features, which are interpreted - as the logits for each class being classified. - """
- - - -
-[docs] -class BinaryClassificationTask(Task): - """Performs binary classification.""" - - # Requires one feature, logit for being signal class. - nb_inputs = 1 - default_target_labels = ["target"] - default_prediction_labels = ["target_pred"] - - def _forward(self, x: Tensor) -> Tensor: - # transform probability of being muon - return torch.sigmoid(x)
- - - -
-[docs] -class BinaryClassificationTaskLogits(Task): - """Performs binary classification form logits.""" - - # Requires one feature, logit for being signal class. - nb_inputs = 1 - default_target_labels = ["target"] - default_prediction_labels = ["target_pred"] - - def _forward(self, x: Tensor) -> Tensor: - return x
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/task/reconstruction.html b/_modules/graphnet/models/task/reconstruction.html deleted file mode 100644 index ccb644a97..000000000 --- a/_modules/graphnet/models/task/reconstruction.html +++ /dev/null @@ -1,609 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.task.reconstruction — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.task.reconstruction

-"""Reconstruction-specific `Model` class(es)."""
-
-import numpy as np
-import torch
-from torch import Tensor
-
-from graphnet.models.task import Task
-from graphnet.utilities.maths import eps_like
-
-
-
-[docs] -class AzimuthReconstructionWithKappa(Task): - """Reconstructs azimuthal angle and associated kappa (1/var).""" - - # Requires two features: untransformed points in (x,y)-space. - default_target_labels = ["azimuth"] - default_prediction_labels = ["azimuth_pred", "azimuth_kappa"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - kappa = torch.linalg.vector_norm(x, dim=1) + eps_like(x) - angle = torch.atan2(x[:, 1], x[:, 0]) - angle = torch.where( - angle < 0, angle + 2 * np.pi, angle - ) # atan(y,x) -> [-pi, pi] - return torch.stack((angle, kappa), dim=1)
- - - -
-[docs] -class AzimuthReconstruction(AzimuthReconstructionWithKappa): - """Reconstructs azimuthal angle.""" - - # Requires two features: untransformed points in (x,y)-space. - default_target_labels = ["azimuth"] - default_prediction_labels = ["azimuth_pred"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - res = super()._forward(x) - angle = res[:, 0].unsqueeze(1) - kappa = res[:, 1] - sigma = torch.sqrt(1.0 / kappa) - beta = 1e-3 - kl_loss = torch.mean(sigma**2 - torch.log(sigma) - 1) - self._regularisation_loss += beta * kl_loss - return angle
- - - -
-[docs] -class DirectionReconstructionWithKappa(Task): - """Reconstructs direction with kappa from the 3D-vMF distribution.""" - - # Requires three features: untransformed points in (x,y,z)-space. - default_target_labels = [ - "direction" - ] # contains dir_x, dir_y, dir_z see https://github.com/graphnet-team/graphnet/blob/95309556cfd46a4046bc4bd7609888aab649e295/src/graphnet/training/labels.py#L29 - default_prediction_labels = [ - "dir_x_pred", - "dir_y_pred", - "dir_z_pred", - "direction_kappa", - ] - nb_inputs = 3 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - kappa = torch.linalg.vector_norm(x, dim=1) + eps_like(x) - vec_x = x[:, 0] / kappa - vec_y = x[:, 1] / kappa - vec_z = x[:, 2] / kappa - return torch.stack((vec_x, vec_y, vec_z, kappa), dim=1)
- - - -
-[docs] -class ZenithReconstruction(Task): - """Reconstructs zenith angle.""" - - # Requires two features: zenith angle itself. - default_target_labels = ["zenith"] - default_prediction_labels = ["zenith_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - return torch.sigmoid(x[:, :1]) * np.pi
- - - -
-[docs] -class ZenithReconstructionWithKappa(ZenithReconstruction): - """Reconstructs zenith angle and associated kappa (1/var).""" - - # Requires one feature in addition to `ZenithReconstruction`: kappa (unceratinty; 1/variance). - default_target_labels = ["zenith"] - default_prediction_labels = ["zenith_pred", "zenith_kappa"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - angle = super()._forward(x[:, :1]).squeeze(1) - kappa = torch.abs(x[:, 1]) + eps_like(x) - return torch.stack((angle, kappa), dim=1)
- - - -
-[docs] -class EnergyReconstruction(Task): - """Reconstructs energy using stable method.""" - - # Requires one feature: untransformed energy - default_target_labels = ["energy"] - default_prediction_labels = ["energy_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform to positive energy domain avoiding `-inf` in `log10` - # Transform, thereby preventing overflow and underflow error. - return torch.nn.functional.softplus(x, beta=0.05) + eps_like(x)
- - - -
-[docs] -class EnergyReconstructionWithPower(Task): - """Reconstructs energy.""" - - # Requires one feature: untransformed energy - default_target_labels = ["energy"] - default_prediction_labels = ["energy_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform energy - return torch.pow(10, x[:, 0] + 1.0).unsqueeze(1)
- - - -
-[docs] -class EnergyReconstructionWithUncertainty(EnergyReconstruction): - """Reconstructs energy and associated uncertainty (log(var)).""" - - # Requires one feature in addition to `EnergyReconstruction`: log-variance (uncertainty). - default_target_labels = ["energy"] - default_prediction_labels = ["energy_pred", "energy_sigma"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform energy - energy = super()._forward(x[:, :1]).squeeze(1) - log_var = x[:, 1] - pred = torch.stack((energy, log_var), dim=1) - return pred
- - - -
-[docs] -class VertexReconstruction(Task): - """Reconstructs vertex position and time.""" - - # Requires four features, x, y, z, and t. - default_target_labels = ["vertex"] - default_prediction_labels = [ - "position_x_pred", - "position_y_pred", - "position_z_pred", - "interaction_time_pred", - ] - nb_inputs = 4 - - def _forward(self, x: Tensor) -> Tensor: - # Scale xyz to roughly the right order of magnitude, leave time - x[:, 0] = x[:, 0] * 1e2 - x[:, 1] = x[:, 1] * 1e2 - x[:, 2] = x[:, 2] * 1e2 - - return x
- - - -
-[docs] -class PositionReconstruction(Task): - """Reconstructs vertex position.""" - - # Requires three features, x, y, and z. - default_target_labels = ["position"] - default_prediction_labels = [ - "position_x_pred", - "position_y_pred", - "position_z_pred", - ] - nb_inputs = 3 - - def _forward(self, x: Tensor) -> Tensor: - # Scale to roughly the right order of magnitude - x[:, 0] = x[:, 0] * 1e2 - x[:, 1] = x[:, 1] * 1e2 - x[:, 2] = x[:, 2] * 1e2 - - return x
- - - -
-[docs] -class TimeReconstruction(Task): - """Reconstructs time.""" - - # Requires one feature, time. - default_target_labels = ["interaction_time"] - default_prediction_labels = ["interaction_time_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Leave as it is - return x
- - - -
-[docs] -class InelasticityReconstruction(Task): - """Reconstructs interaction inelasticity. - - That is, 1-(track energy / hadronic energy). - """ - - # Requires one features: inelasticity itself - default_target_labels = ["inelasticity"] - default_prediction_labels = ["inelasticity_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform output to unit range - return torch.sigmoid(x)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/task/task.html b/_modules/graphnet/models/task/task.html deleted file mode 100644 index f592e588a..000000000 --- a/_modules/graphnet/models/task/task.html +++ /dev/null @@ -1,685 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.task.task — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.task.task

-"""Base physics task-specific `Model` class(es)."""
-
-from abc import abstractmethod
-from typing import Any, TYPE_CHECKING, List, Tuple, Union
-from typing import Callable, Optional
-import numpy as np
-
-import torch
-from torch import Tensor
-from torch.nn import Linear
-from torch_geometric.data import Data
-
-if TYPE_CHECKING:
-    # Avoid cyclic dependency
-    from graphnet.training.loss_functions import LossFunction  # type: ignore[attr-defined]
-
-from graphnet.models import Model
-from graphnet.utilities.decorators import final
-
-
-
-[docs] -class Task(Model): - """Base class for all reconstruction and classification tasks.""" - - @property - @abstractmethod - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - - @property - @abstractmethod - def default_target_labels(self) -> List[str]: - """Return default target labels.""" - return self._default_target_labels - - @property - @abstractmethod - def default_prediction_labels(self) -> List[str]: - """Return default prediction labels.""" - return self._default_prediction_labels - - def __init__( - self, - *, - hidden_size: int, - loss_function: "LossFunction", - target_labels: Optional[Union[str, List[str]]] = None, - prediction_labels: Optional[Union[str, List[str]]] = None, - transform_prediction_and_target: Optional[Callable] = None, - transform_target: Optional[Callable] = None, - transform_inference: Optional[Callable] = None, - transform_support: Optional[Tuple] = None, - loss_weight: Optional[str] = None, - ): - """Construct `Task`. - - Args: - hidden_size: The number of nodes in the layer feeding into this - tasks, used to construct the affine transformation to the - predicted quantity. - loss_function: Loss function appropriate to the task. - target_labels: Name(s) of the quantity/-ies being predicted, used - to extract the target tensor(s) from the `Data` object in - `.compute_loss(...)`. - prediction_labels: The name(s) of each column that is predicted by - the model during inference. If not given, the name will auto - matically be set to `target_label + _pred`. - transform_prediction_and_target: Optional function to transform - both the predicted and target tensor before passing them to the - loss function. Useful e.g. for having the model predict - quantities on a physical scale, but transforming this scale to - O(1) for a numerically stable loss computation. - transform_target: Optional function to transform only the target - tensor before passing it, and the predicted tensor, to the loss - function. Useful e.g. for having the model predict a - transformed version of the target quantity, e.g. the log10- - scaled energy, rather than the physical quantity itself. Used - in conjunction with `transform_inference` to perform the - inverse transform on the predicted quantity to recover the - physical scale. - transform_inference: Optional function to inverse-transform the - model prediction to recover a physical scale. Used in - conjunction with `transform_target`. - transform_support: Optional tuple to specify minimum and maximum - of the range of validity for the inverse transforms - `transform_target` and `transform_inference` in case this is - restricted. By default the invertibility of `transform_target` - is tested on the range [-1e6, 1e6]. - loss_weight: Name of the attribute in `data` containing per-event - loss weights. - """ - # Base class constructor - super().__init__() - # Check(s) - if target_labels is None: - target_labels = self.default_target_labels - if isinstance(target_labels, str): - target_labels = [target_labels] - - if prediction_labels is None: - prediction_labels = self.default_prediction_labels - if isinstance(prediction_labels, str): - prediction_labels = [prediction_labels] - - assert isinstance(target_labels, List) # mypy - assert isinstance(prediction_labels, List) # mypy - # Member variables - self._regularisation_loss: Optional[float] = None - self._target_labels = target_labels - self._prediction_labels = prediction_labels - self._loss_function = loss_function - self._inference = False - self._loss_weight = loss_weight - - self._transform_prediction_training: Callable[ - [Tensor], Tensor - ] = lambda x: x - self._transform_prediction_inference: Callable[ - [Tensor], Tensor - ] = lambda x: x - self._transform_target: Callable[[Tensor], Tensor] = lambda x: x - self._validate_and_set_transforms( - transform_prediction_and_target, - transform_target, - transform_inference, - transform_support, - ) - - # Mapping from last hidden layer to required size of input - self._affine = Linear(hidden_size, self.nb_inputs) - -
-[docs] - @final - def forward(self, x: Union[Tensor, Data]) -> Union[Tensor, Data]: - """Forward pass.""" - self._regularisation_loss = 0 # Reset - x = self._affine(x) - x = self._forward(x) - return self._transform_prediction(x)
- - - @final - def _transform_prediction( - self, prediction: Union[Tensor, Data] - ) -> Union[Tensor, Data]: - if self._inference: - return self._transform_prediction_inference(prediction) - else: - return self._transform_prediction_training(prediction) - - @abstractmethod - def _forward(self, x: Union[Tensor, Data]) -> Union[Tensor, Data]: - """Syntax like `.forward`, for implentation in inheriting classes.""" - -
-[docs] - @final - def compute_loss(self, pred: Union[Tensor, Data], data: Data) -> Tensor: - """Compute loss of `pred` wrt. - - target labels in `data`. - """ - target = torch.stack( - [data[label] for label in self._target_labels], dim=1 - ) - target = self._transform_target(target) - if self._loss_weight is not None: - weights = data[self._loss_weight] - else: - weights = None - loss = ( - self._loss_function(pred, target, weights=weights) - + self._regularisation_loss - ) - return loss
- - -
-[docs] - @final - def inference(self) -> None: - """Activate inference mode.""" - self._inference = True
- - -
-[docs] - @final - def train_eval(self) -> None: - """Deactivate inference mode.""" - self._inference = False
- - - @final - def _validate_and_set_transforms( - self, - transform_prediction_and_target: Union[Callable, None], - transform_target: Union[Callable, None], - transform_inference: Union[Callable, None], - transform_support: Union[Tuple, None], - ) -> None: - """Validate and set transforms. - - Assert that a valid combination of transformation arguments are passed - and update the corresponding functions. - """ - # Checks - assert not ( - (transform_prediction_and_target is not None) - and (transform_target is not None) - ), "Please specify at most one of `transform_prediction_and_target` and `transform_target`" - if (transform_target is not None) != (transform_inference is not None): - self.warning( - "Setting one of `transform_target` and `transform_inference`, but not " - "the other." - ) - - if transform_target is not None: - assert transform_target is not None - assert transform_inference is not None - - if transform_support is not None: - assert transform_support is not None - - assert ( - len(transform_support) == 2 - ), "Please specify min and max for transformation support." - x_test = torch.from_numpy( - np.linspace(transform_support[0], transform_support[1], 10) - ) - else: - x_test = np.logspace(-6, 6, 12 + 1) - x_test = torch.from_numpy( - np.concatenate([-x_test[::-1], [0], x_test]) - ) - - # Add feature dimension before inference transformation to make it - # match the dimensions of a standard prediction. Remove it again - # before comparison. Temporary - try: - t_test = torch.unsqueeze(transform_target(x_test), -1) - t_test = torch.squeeze(transform_inference(t_test), -1) - valid = torch.isfinite(t_test) - - assert torch.allclose(t_test[valid], x_test[valid]), ( - "The provided transforms for targets during training and " - "predictions during inference are not inverse. Please " - "adjust transformation functions or support." - ) - del x_test, t_test, valid - - except IndexError: - self.warning( - "transform_target and/or transform_inference rely on " - "indexing, which we won't validate. Please make sure that " - "they are mutually inverse, i.e. that\n" - " x = transform_inference(transform_target(x))\n" - "for all x that are within your target range." - ) - - # Set transforms - if transform_prediction_and_target is not None: - self._transform_prediction_training = ( - transform_prediction_and_target - ) - self._transform_target = transform_prediction_and_target - else: - if transform_target is not None: - self._transform_target = transform_target - if transform_inference is not None: - self._transform_prediction_inference = transform_inference
- - - -
-[docs] -class IdentityTask(Task): - """Identity, or trivial, task.""" - - def __init__( - self, - nb_outputs: int, - target_labels: Union[List[str], Any], - *args: Any, - **kwargs: Any, - ): - """Construct IdentityTask. - - Return the `nb_outputs` as a direct, affine transformation of the last - hidden layer. - """ - self._nb_inputs = nb_outputs - self._default_target_labels = ( - target_labels - if isinstance(target_labels, list) - else [target_labels] - ) - self._default_prediction_labels = [ - f"target_{i}_pred" for i in range(len(self._default_target_labels)) - ] - - super().__init__(*args, **kwargs) - # Base class constructor - - @property - def default_target_labels(self) -> List[str]: - """Return default target labels.""" - return self._default_target_labels - - @property - def default_prediction_labels(self) -> List[str]: - """Return default prediction labels.""" - return self._default_prediction_labels - - @property - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - return self._nb_inputs - - def _forward(self, x: Tensor) -> Tensor: - # Leave it as is. - return x
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/utils.html b/_modules/graphnet/models/utils.html deleted file mode 100644 index 35c1b3460..000000000 --- a/_modules/graphnet/models/utils.html +++ /dev/null @@ -1,430 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.utils — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.utils

-"""Utility functions for `graphnet.models`."""
-
-from typing import List, Tuple, Union
-from torch_geometric.nn import knn_graph
-from torch_geometric.data import Batch
-import torch
-from torch import Tensor, LongTensor
-
-from torch_geometric.utils.homophily import homophily
-
-
-
-[docs] -def calculate_xyzt_homophily( - x: Tensor, edge_index: LongTensor, batch: Batch -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Calculate xyzt-homophily from a batch of graphs. - - Homophily is a graph scalar quantity that measures the likeness of - variables in nodes. Notice that this calculator assumes a special order of - input features in x. - - Returns: - Tuple, each element with shape [batch_size,1]. - """ - hx = homophily(edge_index, x[:, 0], batch).reshape(-1, 1) - hy = homophily(edge_index, x[:, 1], batch).reshape(-1, 1) - hz = homophily(edge_index, x[:, 2], batch).reshape(-1, 1) - ht = homophily(edge_index, x[:, 3], batch).reshape(-1, 1) - return hx, hy, hz, ht
- - - -
-[docs] -def calculate_distance_matrix(xyz_coords: Tensor) -> Tensor: - """Calculate the matrix of pairwise distances between pulses. - - Args: - xyz_coords: (x,y,z)-coordinates of pulses, of shape [nb_doms, 3]. - - Returns: - Matrix of pairwise distances, of shape [nb_doms, nb_doms] - """ - diff = xyz_coords.unsqueeze(dim=2) - xyz_coords.T.unsqueeze(dim=0) - return torch.sqrt(torch.sum(diff**2, dim=1))
- - - -
-[docs] -def knn_graph_batch(batch: Batch, k: List[int], columns: List[int]) -> Batch: - """Calculate k-nearest-neighbours with individual k for each batch event. - - Args: - batch: Batch of events. - k: A list of k's. - columns: The columns of Data.x used for computing the distances. E.g., - Data.x[:,[0,1,2]] - - Returns: - Returns the same batch of events, but with updated edges. - """ - data_list = batch.to_data_list() - for i in range(len(data_list)): - data_list[i].edge_index = knn_graph( - x=data_list[i].x[:, columns], k=k[i] - ) - return Batch.from_data_list(data_list)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/training/labels.html b/_modules/graphnet/training/labels.html deleted file mode 100644 index b2dd18a77..000000000 --- a/_modules/graphnet/training/labels.html +++ /dev/null @@ -1,436 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.training.labels — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.training.labels

-"""Class(es) for constructing training labels at runtime."""
-
-from abc import ABC, abstractmethod
-import torch
-from torch_geometric.data import Data
-from graphnet.utilities.logging import Logger
-
-
-
-[docs] -class Label(ABC, Logger): - """Base `Label` class for producing labels from single `Data` instance.""" - - def __init__(self, key: str): - """Construct `Label`. - - Args: - key: The name of the field in `Data` where the label will be - stored. That is, `graph[key] = label`. - """ - self._key = key - - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - @property - def key(self) -> str: - """Return value of `key`.""" - return self._key - - @abstractmethod - def __call__(self, graph: Data) -> torch.tensor: - """Label-specific implementation."""
- - - -
-[docs] -class Direction(Label): - """Class for producing particle direction/pointing label.""" - - def __init__( - self, - key: str = "direction", - azimuth_key: str = "azimuth", - zenith_key: str = "zenith", - ): - """Construct `Direction`. - - Args: - key: The name of the field in `Data` where the label will be - stored. That is, `graph[key] = label`. - azimuth_key: The name of the pre-existing key in `graph` that will - be used to access the azimiuth angle, used when calculating - the direction. - zenith_key: The name of the pre-existing key in `graph` that will - be used to access the zenith angle, used when calculating the - direction. - """ - self._azimuth_key = azimuth_key - self._zenith_key = zenith_key - - # Base class constructor - super().__init__(key=key) - - def __call__(self, graph: Data) -> torch.tensor: - """Compute label for `graph`.""" - x = torch.cos(graph[self._azimuth_key]) * torch.sin( - graph[self._zenith_key] - ).reshape(-1, 1) - y = torch.sin(graph[self._azimuth_key]) * torch.sin( - graph[self._zenith_key] - ).reshape(-1, 1) - z = torch.cos(graph[self._zenith_key]).reshape(-1, 1) - return torch.cat((x, y, z), dim=1)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/training/loss_functions.html b/_modules/graphnet/training/loss_functions.html deleted file mode 100644 index d1b35f50a..000000000 --- a/_modules/graphnet/training/loss_functions.html +++ /dev/null @@ -1,856 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.training.loss_functions — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.training.loss_functions

-"""Collection of loss functions.
-
-All loss functions inherit from `LossFunction` which ensures a common syntax,
-handles per-event weights, etc.
-"""
-
-from abc import abstractmethod
-from typing import Any, Optional, Union, List, Dict
-
-import numpy as np
-import scipy.special
-import torch
-from torch import Tensor
-from torch import nn
-from torch.nn.functional import (
-    one_hot,
-    cross_entropy,
-    binary_cross_entropy,
-    softplus,
-)
-
-from graphnet.models.model import Model
-from graphnet.utilities.decorators import final
-
-
-
-[docs] -class LossFunction(Model): - """Base class for loss functions in `graphnet`.""" - - def __init__(self, **kwargs: Any) -> None: - """Construct `LossFunction`, saving model config.""" - super().__init__(**kwargs) - -
-[docs] - @final - def forward( # type: ignore[override] - self, - prediction: Tensor, - target: Tensor, - weights: Optional[Tensor] = None, - return_elements: bool = False, - ) -> Tensor: - """Forward pass for all loss functions. - - Args: - prediction: Tensor containing predictions. Shape [N,P] - target: Tensor containing targets. Shape [N,T] - return_elements: Whether elementwise loss terms should be returned. - The alternative is to return the averaged loss across examples. - - Returns: - Loss, either averaged to a scalar (if `return_elements = False`) or - elementwise terms with shape [N,] (if `return_elements = True`). - """ - elements = self._forward(prediction, target) - if weights is not None: - elements = elements * weights - assert elements.size(dim=0) == target.size( - dim=0 - ), "`_forward` should return elementwise loss terms." - - return elements if return_elements else torch.mean(elements)
- - - @abstractmethod - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Syntax like `.forward`, for implentation in inheriting classes."""
- - - -
-[docs] -class MSELoss(LossFunction): - """Mean squared error loss.""" - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Implement loss calculation.""" - # Check(s) - assert prediction.dim() == 2 - assert prediction.size() == target.size() - - elements = torch.mean((prediction - target) ** 2, dim=-1) - return elements
- - - -
-[docs] -class RMSELoss(MSELoss): - """Root mean squared error loss.""" - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Implement loss calculation.""" - # Check(s) - elements = super()._forward(prediction, target) - elements = torch.sqrt(elements) - return elements
- - - -
-[docs] -class LogCoshLoss(LossFunction): - """Log-cosh loss function. - - Acts like x^2 for small x; and like |x| for large x. - """ - - @classmethod - def _log_cosh(cls, x: Tensor) -> Tensor: # pylint: disable=invalid-name - """Numerically stable version on log(cosh(x)). - - Used to avoid `inf` for even moderately large differences. - See [https://github.com/keras-team/keras/blob/v2.6.0/keras/losses.py#L1580-L1617] - """ - return x + softplus(-2.0 * x) - np.log(2.0) - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Implement loss calculation.""" - diff = prediction - target - elements = self._log_cosh(diff) - return elements
- - - -
-[docs] -class CrossEntropyLoss(LossFunction): - """Compute cross-entropy loss for classification tasks. - - Predictions are an [N, num_class]-matrix of logits (i.e., non-softmax'ed - probabilities), and targets are an [N,1]-matrix with integer values in - (0, num_classes - 1). - """ - - def __init__( - self, - options: Union[int, List[Any], Dict[Any, int]], - *args: Any, - **kwargs: Any, - ): - """Construct CrossEntropyLoss.""" - # Base class constructor - super().__init__(*args, **kwargs) - - # Member variables - self._options = options - self._nb_classes: int - if isinstance(self._options, int): - assert self._options in [torch.int32, torch.int64] - assert ( - self._options >= 2 - ), f"Minimum of two classes required. Got {self._options}." - self._nb_classes = options # type: ignore - elif isinstance(self._options, list): - self._nb_classes = len(self._options) # type: ignore - elif isinstance(self._options, dict): - self._nb_classes = len( - np.unique(list(self._options.values())) - ) # type: ignore - else: - raise ValueError( - f"Class options of type {type(self._options)} not supported" - ) - - self._loss = nn.CrossEntropyLoss(reduction="none") - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Transform outputs to angle and prepare prediction.""" - if isinstance(self._options, int): - # Integer number of classes: Targets are expected to be in - # (0, nb_classes - 1). - - # Target integers are positive - assert torch.all(target >= 0) - - # Target integers are consistent with the expected number of class. - assert torch.all(target < self._options) - - assert target.dtype in [torch.int32, torch.int64] - target_integer = target - - elif isinstance(self._options, list): - # List of classes: Mapping target classes in list onto - # (0, nb_classes - 1). Example: - # Given options: [1, 12, 13, ...] - # Yields: [1, 13, 12] -> [0, 2, 1, ...] - target_integer = torch.tensor( - [self._options.index(value) for value in target] - ) - - elif isinstance(self._options, dict): - # Dictionary of classes: Mapping target classes in dict onto - # (0, nb_classes - 1). Example: - # Given options: {1: 0, -1: 0, 12: 1, -12: 1, ...} - # Yields: [1, -1, -12, ...] -> [0, 0, 1, ...] - target_integer = torch.tensor( - [self._options[int(value)] for value in target] - ) - - else: - assert False, "Shouldn't reach here." - - target_one_hot: Tensor = one_hot(target_integer, self._nb_classes).to( - prediction.device - ) - - return self._loss(prediction.float(), target_one_hot.float())
- - - -
-[docs] -class BinaryCrossEntropyLoss(LossFunction): - """Compute binary cross entropy loss. - - Predictions are vector probabilities (i.e., values between 0 and 1), and - targets should be 0 and 1. - """ - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - return binary_cross_entropy( - prediction.float(), target.float(), reduction="none" - )
- - - -
-[docs] -class LogCMK(torch.autograd.Function): - """MIT License. - - Copyright (c) 2019 Max Ryabinin - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - _____________________ - - From [https://github.com/mryab/vmf_loss/blob/master/losses.py] - Modified to use modified Bessel function instead of exponentially scaled ditto - (i.e. `.ive` -> `.iv`) as indiciated in [1812.04616] in spite of suggestion in - Sec. 8.2 of this paper. The change has been validated through comparison with - exact calculations for `m=2` and `m=3` and found to yield the correct results. - """ - -
-[docs] - @staticmethod - def forward( - ctx: Any, m: int, kappa: Tensor - ) -> Tensor: # pylint: disable=invalid-name,arguments-differ - """Forward pass.""" - dtype = kappa.dtype - ctx.save_for_backward(kappa) - ctx.m = m - ctx.dtype = dtype - kappa = kappa.double() - iv = torch.from_numpy( - scipy.special.iv(m / 2.0 - 1, kappa.cpu().numpy()) - ).to(kappa.device) - return ( - (m / 2.0 - 1) * torch.log(kappa) - - torch.log(iv) - - (m / 2) * np.log(2 * np.pi) - ).type(dtype)
- - -
-[docs] - @staticmethod - def backward( - ctx: Any, grad_output: Tensor - ) -> Tensor: # pylint: disable=invalid-name,arguments-differ - """Backward pass.""" - kappa = ctx.saved_tensors[0] - m = ctx.m - dtype = ctx.dtype - kappa = kappa.double().cpu().numpy() - grads = -( - (scipy.special.iv(m / 2.0, kappa)) - / (scipy.special.iv(m / 2.0 - 1, kappa)) - ) - return ( - None, - grad_output - * torch.from_numpy(grads).to(grad_output.device).type(dtype), - )
-
- - - -
-[docs] -class VonMisesFisherLoss(LossFunction): - """General class for calculating von Mises-Fisher loss. - - Requires implementation for specific dimension `m` in which the target and - prediction vectors need to be prepared. - """ - -
-[docs] - @classmethod - def log_cmk_exact( - cls, m: int, kappa: Tensor - ) -> Tensor: # pylint: disable=invalid-name - """Calculate $log C_{m}(k)$ term in von Mises-Fisher loss exactly.""" - return LogCMK.apply(m, kappa)
- - -
-[docs] - @classmethod - def log_cmk_approx( - cls, m: int, kappa: Tensor - ) -> Tensor: # pylint: disable=invalid-name - """Calculate $log C_{m}(k)$ term in von Mises-Fisher loss approx. - - [https://arxiv.org/abs/1812.04616] Sec. 8.2 with additional minus sign. - """ - v = m / 2.0 - 0.5 - a = torch.sqrt((v + 1) ** 2 + kappa**2) - b = v - 1 - return -a + b * torch.log(b + a)
- - -
-[docs] - @classmethod - def log_cmk( - cls, m: int, kappa: Tensor, kappa_switch: float = 100.0 - ) -> Tensor: # pylint: disable=invalid-name - """Calculate $log C_{m}(k)$ term in von Mises-Fisher loss. - - Since `log_cmk_exact` is diverges for `kappa` >~ 700 (using float64 - precision), and since `log_cmk_approx` is unaccurate for small `kappa`, - this method automatically switches between the two at `kappa_switch`, - ensuring continuity at this point. - """ - kappa_switch = torch.tensor([kappa_switch]).to(kappa.device) - mask_exact = kappa < kappa_switch - - # Ensure continuity at `kappa_switch` - offset = cls.log_cmk_approx(m, kappa_switch) - cls.log_cmk_exact( - m, kappa_switch - ) - ret = cls.log_cmk_approx(m, kappa) - offset - ret[mask_exact] = cls.log_cmk_exact(m, kappa[mask_exact]) - return ret
- - - def _evaluate(self, prediction: Tensor, target: Tensor) -> Tensor: - """Calculate von Mises-Fisher loss for a vector in D dimensons. - - This loss utilises the von Mises-Fisher distribution, which is a - probability distribution on the (D - 1) sphere in D-dimensional space. - - Args: - prediction: Predicted vector, of shape [batch_size, D]. - target: Target unit vector, of shape [batch_size, D]. - - Returns: - Elementwise von Mises-Fisher loss terms. - """ - # Check(s) - assert prediction.dim() == 2 - assert target.dim() == 2 - assert prediction.size() == target.size() - - # Computing loss - m = target.size()[1] - k = torch.norm(prediction, dim=1) - dotprod = torch.sum(prediction * target, dim=1) - elements = -self.log_cmk(m, k) - dotprod - return elements - - @abstractmethod - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - raise NotImplementedError
- - - -
-[docs] -class VonMisesFisher2DLoss(VonMisesFisherLoss): - """von Mises-Fisher loss function vectors in the 2D plane.""" - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Calculate von Mises-Fisher loss for an angle in the 2D plane. - - Args: - prediction: Output of the model. Must have shape [N, 2] where 0th - column is a prediction of `angle` and 1st column is an estimate - of `kappa`. - target: Target tensor, extracted from graph object. - - Returns: - loss: Elementwise von Mises-Fisher loss terms. Shape [N,] - """ - # Check(s) - assert prediction.dim() == 2 and prediction.size()[1] == 2 - assert target.dim() == 2 - assert prediction.size()[0] == target.size()[0] - - # Formatting target - angle_true = target[:, 0] - t = torch.stack( - [ - torch.cos(angle_true), - torch.sin(angle_true), - ], - dim=1, - ) - - # Formatting prediction - angle_pred = prediction[:, 0] - kappa = prediction[:, 1] - p = kappa.unsqueeze(1) * torch.stack( - [ - torch.cos(angle_pred), - torch.sin(angle_pred), - ], - dim=1, - ) - - return self._evaluate(p, t)
- - - -
-[docs] -class EuclideanDistanceLoss(LossFunction): - """Mean squared error in three dimensions.""" - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Calculate 3D Euclidean distance between predicted and target. - - Args: - prediction: Output of the model. Must have shape [N, 3] - target: Target tensor, extracted from graph object. - - Returns: - Elementwise von Mises-Fisher loss terms. Shape [N,] - """ - return torch.sqrt( - (prediction[:, 0] - target[:, 0]) ** 2 - + (prediction[:, 1] - target[:, 1]) ** 2 - + (prediction[:, 2] - target[:, 2]) ** 2 - )
- - - -
-[docs] -class VonMisesFisher3DLoss(VonMisesFisherLoss): - """von Mises-Fisher loss function vectors in the 3D plane.""" - - def _forward(self, prediction: Tensor, target: Tensor) -> Tensor: - """Calculate von Mises-Fisher loss for a direction in the 3D. - - Args: - prediction: Output of the model. Must have shape [N, 4] where - columns 0, 1, 2 are predictions of `direction` and last column - is an estimate of `kappa`. - target: Target tensor, extracted from graph object. - - Returns: - Elementwise von Mises-Fisher loss terms. Shape [N,] - """ - target = target.reshape(-1, 3) - # Check(s) - assert prediction.dim() == 2 and prediction.size()[1] == 4 - assert target.dim() == 2 - assert prediction.size()[0] == target.size()[0] - - kappa = prediction[:, 3] - p = kappa.unsqueeze(1) * prediction[:, [0, 1, 2]] - return self._evaluate(p, target)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/training/utils.html b/_modules/graphnet/training/utils.html deleted file mode 100644 index 787b594c7..000000000 --- a/_modules/graphnet/training/utils.html +++ /dev/null @@ -1,656 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.training.utils — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.training.utils

-"""Utility functions for `graphnet.training`."""
-
-from collections import OrderedDict
-import os
-from typing import Dict, List, Optional, Tuple, Union, Callable
-
-import numpy as np
-import pandas as pd
-from pytorch_lightning import Trainer
-from sklearn.model_selection import train_test_split
-from torch.utils.data import DataLoader
-from torch_geometric.data import Batch, Data
-
-from graphnet.data.dataset import Dataset
-from graphnet.data.dataset import SQLiteDataset
-from graphnet.data.dataset import ParquetDataset
-from graphnet.models import Model
-from graphnet.utilities.logging import Logger
-from graphnet.models.graphs import GraphDefinition
-
-
-
-[docs] -def collate_fn(graphs: List[Data]) -> Batch: - """Remove graphs with less than two DOM hits. - - Should not occur in "production". - """ - graphs = [g for g in graphs if g.n_pulses > 1] - return Batch.from_data_list(graphs)
- - - -# @TODO: Remove in favour of DataLoader{,.from_dataset_config} -
-[docs] -def make_dataloader( - db: str, - pulsemaps: Union[str, List[str]], - graph_definition: GraphDefinition, - features: List[str], - truth: List[str], - *, - batch_size: int, - shuffle: bool, - selection: Optional[List[int]] = None, - num_workers: int = 10, - persistent_workers: bool = True, - node_truth: List[str] = None, - truth_table: str = "truth", - node_truth_table: Optional[str] = None, - string_selection: List[int] = None, - loss_weight_table: Optional[str] = None, - loss_weight_column: Optional[str] = None, - index_column: str = "event_no", - labels: Optional[Dict[str, Callable]] = None, -) -> DataLoader: - """Construct `DataLoader` instance.""" - # Check(s) - if isinstance(pulsemaps, str): - pulsemaps = [pulsemaps] - - dataset = SQLiteDataset( - path=db, - pulsemaps=pulsemaps, - features=features, - truth=truth, - selection=selection, - node_truth=node_truth, - truth_table=truth_table, - node_truth_table=node_truth_table, - string_selection=string_selection, - loss_weight_table=loss_weight_table, - loss_weight_column=loss_weight_column, - index_column=index_column, - graph_definition=graph_definition, - ) - - # adds custom labels to dataset - if isinstance(labels, dict): - for label in labels.keys(): - dataset.add_label(key=label, fn=labels[label]) - - dataloader = DataLoader( - dataset, - batch_size=batch_size, - shuffle=shuffle, - num_workers=num_workers, - collate_fn=collate_fn, - persistent_workers=persistent_workers, - prefetch_factor=2, - ) - - return dataloader
- - - -# @TODO: Remove in favour of DataLoader{,.from_dataset_config} -
-[docs] -def make_train_validation_dataloader( - db: str, - graph_definition: GraphDefinition, - selection: Optional[List[int]], - pulsemaps: Union[str, List[str]], - features: List[str], - truth: List[str], - *, - batch_size: int, - database_indices: Optional[List[int]] = None, - seed: int = 42, - test_size: float = 0.33, - num_workers: int = 10, - persistent_workers: bool = True, - node_truth: Optional[str] = None, - truth_table: str = "truth", - node_truth_table: Optional[str] = None, - string_selection: Optional[List[int]] = None, - loss_weight_column: Optional[str] = None, - loss_weight_table: Optional[str] = None, - index_column: str = "event_no", - labels: Optional[Dict[str, Callable]] = None, -) -> Tuple[DataLoader, DataLoader]: - """Construct train and test `DataLoader` instances.""" - # Reproducibility - rng = np.random.default_rng(seed=seed) - # Checks(s) - if isinstance(pulsemaps, str): - pulsemaps = [pulsemaps] - - if selection is None: - # If no selection is provided, use all events in dataset. - dataset: Dataset - if db.endswith(".db"): - dataset = SQLiteDataset( - path=db, - graph_definition=graph_definition, - pulsemaps=pulsemaps, - features=features, - truth=truth, - truth_table=truth_table, - index_column=index_column, - ) - elif db.endswith(".parquet"): - dataset = ParquetDataset( - path=db, - graph_definition=graph_definition, - pulsemaps=pulsemaps, - features=features, - truth=truth, - truth_table=truth_table, - index_column=index_column, - ) - else: - raise RuntimeError( - f"File {db} with format {db.split('.'[-1])} not supported." - ) - selection = dataset._get_all_indices() - - # Perform train/validation split - if isinstance(db, list): - df_for_shuffle = pd.DataFrame( - {"event_no": selection, "db": database_indices} - ) - shuffled_df = df_for_shuffle.sample( - frac=1, replace=False, random_state=rng - ) - training_df, validation_df = train_test_split( - shuffled_df, test_size=test_size, random_state=seed - ) - training_selection = training_df.values.tolist() - validation_selection = validation_df.values.tolist() - else: - training_selection, validation_selection = train_test_split( - selection, test_size=test_size, random_state=seed - ) - - # Create DataLoaders - common_kwargs = dict( - db=db, - pulsemaps=pulsemaps, - features=features, - truth=truth, - batch_size=batch_size, - num_workers=num_workers, - persistent_workers=persistent_workers, - node_truth=node_truth, - truth_table=truth_table, - node_truth_table=node_truth_table, - string_selection=string_selection, - loss_weight_column=loss_weight_column, - loss_weight_table=loss_weight_table, - index_column=index_column, - labels=labels, - graph_definition=graph_definition, - ) - - training_dataloader = make_dataloader( - shuffle=True, - selection=training_selection, - **common_kwargs, # type: ignore[arg-type] - ) - - validation_dataloader = make_dataloader( - shuffle=False, - selection=validation_selection, - **common_kwargs, # type: ignore[arg-type] - ) - - return ( - training_dataloader, - validation_dataloader, - )
- - - -# @TODO: Remove in favour of Model.predict{,_as_dataframe} -
-[docs] -def get_predictions( - trainer: Trainer, - model: Model, - dataloader: DataLoader, - prediction_columns: List[str], - *, - node_level: bool = False, - additional_attributes: Optional[List[str]] = None, -) -> pd.DataFrame: - """Get `model` predictions on `dataloader`.""" - # Gets predictions from model on the events in the dataloader. - # NOTE: dataloader must NOT have shuffle = True! - - # Check(s) - if additional_attributes is None: - additional_attributes = [] - assert isinstance(additional_attributes, list) - - # Set model to inference mode - model.inference() - - # Get predictions - predictions_torch = trainer.predict(model, dataloader) - predictions_list = [ - p[0].detach().cpu().numpy() for p in predictions_torch - ] # Assuming single task - predictions = np.concatenate(predictions_list, axis=0) - try: - assert len(prediction_columns) == predictions.shape[1] - except IndexError: - predictions = predictions.reshape((-1, 1)) - assert len(prediction_columns) == predictions.shape[1] - - # Get additional attributes - attributes: Dict[str, List[np.ndarray]] = OrderedDict( - [(attr, []) for attr in additional_attributes] - ) - for batch in dataloader: - for attr in attributes: - attribute = batch[attr].detach().cpu().numpy() - if node_level: - if attr == "event_no": - attribute = np.repeat( - attribute, batch["n_pulses"].detach().cpu().numpy() - ) - attributes[attr].extend(attribute) - - data = np.concatenate( - [predictions] - + [ - np.asarray(values)[:, np.newaxis] for values in attributes.values() - ], - axis=1, - ) - - results = pd.DataFrame( - data, columns=prediction_columns + additional_attributes - ) - return results
- - - -# @TODO: Remove -
-[docs] -def save_results( - db: str, tag: str, results: pd.DataFrame, archive: str, model: Model -) -> None: - """Save trained model and prediction `results` in `db`.""" - db_name = db.split("/")[-1].split(".")[0] - path = archive + "/" + db_name + "/" + tag - os.makedirs(path, exist_ok=True) - results.to_csv(path + "/results.csv") - model.save_state_dict(path + "/" + tag + "_state_dict.pth") - model.save(path + "/" + tag + "_model.pth") - Logger().info("Results saved at: \n %s" % path)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html index 0a020631b..489cbbceb 100644 --- a/_modules/index.html +++ b/_modules/index.html @@ -323,10 +323,6 @@

All modules for which code is available

@@ -564,36 +458,7 @@
@@ -603,202 +468,8 @@
-
-

dataset

-

Base Dataset class(es) used in GraphNeT.

-
-
-exception graphnet.data.dataset.dataset.ColumnMissingException[source]
-

Bases: Exception

-

Exception to indicate a missing column in a dataset.

-
-
-
-graphnet.data.dataset.dataset.load_module(class_name)[source]
-

Load graphnet module from string name.

-
-
Parameters:
-

class_name (str) – name of class

-
-
Return type:
-

Type

-
-
Returns:
-

graphnet module.

-
-
-
-
-
-graphnet.data.dataset.dataset.parse_graph_definition(cfg)[source]
-

Construct GraphDefinition from DatasetConfig.

-
-
Return type:
-

GraphDefinition

-
-
Parameters:
-

cfg (dict) –

-
-
-
-
-
-class graphnet.data.dataset.dataset.Dataset(*args, **kwargs)[source]
-

Bases: Logger, Configurable, Dataset, ABC

-

Base Dataset class for reading from any intermediate file format.

-

Construct Dataset.

-
-
Parameters:
-
    -
  • path (Union[str, List[str]]) – Path to the file(s) from which this Dataset should read.

  • -
  • pulsemaps (Union[str, List[str]]) – Name(s) of the pulse map series that should be used to -construct the nodes on the individual graph objects, and their -features. Multiple pulse series maps can be used, e.g., when -different DOM types are stored in different maps.

  • -
  • features (List[str]) – List of columns in the input files that should be used as -node features on the graph objects.

  • -
  • truth (List[str]) – List of event-level columns in the input files that should -be used added as attributes on the graph objects.

  • -
  • node_truth (Optional[List[str]], default: None) – List of node-level columns in the input files that -should be used added as attributes on the graph objects.

  • -
  • index_column (str, default: 'event_no') – Name of the column in the input files that contains -unique indicies to identify and map events across tables.

  • -
  • truth_table (str, default: 'truth') – Name of the table containing event-level truth -information.

  • -
  • node_truth_table (Optional[str], default: None) – Name of the table containing node-level truth -information.

  • -
  • string_selection (Optional[List[int]], default: None) – Subset of strings for which data should be read -and used to construct graph objects. Defaults to None, meaning -all strings for which data exists are used.

  • -
  • selection (Union[str, List[int], List[List[int]], None], default: None) – The events that should be read. This can be given either -as list of indicies (in index_column); or a string-based -selection used to query the Dataset for events passing the -selection. Defaults to None, meaning that all events in the -input files are read.

  • -
  • dtype (dtype, default: torch.float32) – Type of the feature tensor on the graph objects returned.

  • -
  • loss_weight_table (Optional[str], default: None) – Name of the table containing per-event loss -weights.

  • -
  • loss_weight_column (Optional[str], default: None) – Name of the column in loss_weight_table -containing per-event loss weights. This is also the name of the -corresponding attribute assigned to the graph object.

  • -
  • loss_weight_default_value (Optional[float], default: None) – Default per-event loss weight. -NOTE: This default value is only applied when -loss_weight_table and loss_weight_column are specified, and -in this case to events with no value in the corresponding -table/column. That is, if no per-event loss weight table/column -is provided, this value is ignored. Defaults to None.

  • -
  • seed (Optional[int], default: None) – Random number generator seed, used for selecting a random -subset of events when resolving a string-based selection (e.g., -“10000 random events ~ event_no % 5 > 0” or “20% random -events ~ event_no % 5 > 0”).

  • -
  • graph_definition (GraphDefinition) – Method that defines the graph representation.

  • -
  • args (Any) –

  • -
  • kwargs (Any) –

  • -
-
-
Return type:
-

object

-
-
-
-
-classmethod from_config(source)[source]
-

Construct Dataset instance from source configuration.

-
-
Return type:
-

Union[Dataset, EnsembleDataset, Dict[str, Dataset], Dict[str, EnsembleDataset]]

-
-
Parameters:
-

source (DatasetConfig | str) –

-
-
-
-
-
-classmethod concatenate(datasets)[source]
-

Concatenate multiple `Dataset`s into one instance.

-
-
Return type:
-

EnsembleDataset

-
-
Parameters:
-

datasets (List[Dataset]) –

-
-
-
-
-
-property path: str | List[str]
-

Path to the file(s) from which this Dataset reads.

-
-
-
-property truth_table: str
-

Name of the table containing event-level truth information.

-
-
-
-abstract query_table(table, columns, sequential_index, selection)[source]
-

Query a table at a specific index, optionally with some selection.

-
-
Parameters:
-
    -
  • table (str) – Table to be queried.

  • -
  • columns (Union[List[str], str]) – Columns to read out.

  • -
  • sequential_index (Optional[int], default: None) – Sequentially numbered index -(i.e. in [0,len(self))) of the event to query. This _may_ -differ from the indexation used in self._indices. If no value -is provided, the entire column is returned.

  • -
  • selection (Optional[str], default: None) – Selection to be imposed before reading out data. -Defaults to None.

  • -
-
-
Return type:
-

List[Tuple[Any, ...]]

-
-
Returns:
-

-
List of tuples containing the values in columns. If the table

contains only scalar data for columns, a list of length 1 is -returned

-
-
-

-
-
Raises:
-

ColumnMissingException – If one or more element in columns is not - present in table.

-
-
-
-
-
-add_label(fn, key)[source]
-

Add custom graph label define using function fn.

-
-
Return type:
-

None

-
-
Parameters:
-
    -
  • fn (Callable[[Data], Any]) –

  • -
  • key (str | None) –

  • -
-
-
-
-
-
-
-class graphnet.data.dataset.dataset.EnsembleDataset(datasets)[source]
-

Bases: ConcatDataset

-

Construct a single dataset from a collection of datasets.

-

Construct a single dataset from a collection of datasets.

-
-
Parameters:
-

datasets (Iterable[Dataset]) – A collection of Datasets

-
-
-
+
+

dataset

diff --git a/api/graphnet.data.dataset.html b/api/graphnet.data.dataset.html index 64cab267c..2160301b4 100644 --- a/api/graphnet.data.dataset.html +++ b/api/graphnet.data.dataset.html @@ -467,9 +467,8 @@
-
-

dataset

-

Dataset classes for training in GraphNeT.

+
+

dataset

Subpackages

diff --git a/api/graphnet.data.dataset.parquet.html b/api/graphnet.data.dataset.parquet.html index 492684fad..280ee7d5d 100644 --- a/api/graphnet.data.dataset.parquet.html +++ b/api/graphnet.data.dataset.parquet.html @@ -475,16 +475,12 @@
-
-

parquet

-

Datasets using parquet backend.

+
+

parquet

Submodules

diff --git a/api/graphnet.data.dataset.parquet.parquet_dataset.html b/api/graphnet.data.dataset.parquet.parquet_dataset.html index d76656ab5..4bd451c80 100644 --- a/api/graphnet.data.dataset.parquet.parquet_dataset.html +++ b/api/graphnet.data.dataset.parquet.parquet_dataset.html @@ -328,36 +328,11 @@ - - @@ -491,18 +466,7 @@
@@ -512,87 +476,8 @@
-
-

parquet_dataset

-

Dataset class(es) for reading from Parquet files.

-
-
-class graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset(*args, **kwargs)[source]
-

Bases: Dataset

-

Pytorch dataset for reading from Parquet files.

-

Construct Dataset.

-
-
Parameters:
-
    -
  • path (Union[str, List[str]]) – Path to the file(s) from which this Dataset should read.

  • -
  • pulsemaps (Union[str, List[str]]) – Name(s) of the pulse map series that should be used to -construct the nodes on the individual graph objects, and their -features. Multiple pulse series maps can be used, e.g., when -different DOM types are stored in different maps.

  • -
  • features (List[str]) – List of columns in the input files that should be used as -node features on the graph objects.

  • -
  • truth (List[str]) – List of event-level columns in the input files that should -be used added as attributes on the graph objects.

  • -
  • node_truth (Optional[List[str]], default: None) – List of node-level columns in the input files that -should be used added as attributes on the graph objects.

  • -
  • index_column (str, default: 'event_no') – Name of the column in the input files that contains -unique indicies to identify and map events across tables.

  • -
  • truth_table (str, default: 'truth') – Name of the table containing event-level truth -information.

  • -
  • node_truth_table (Optional[str], default: None) – Name of the table containing node-level truth -information.

  • -
  • string_selection (Optional[List[int]], default: None) – Subset of strings for which data should be read -and used to construct graph objects. Defaults to None, meaning -all strings for which data exists are used.

  • -
  • selection (Union[str, List[int], List[List[int]], None], default: None) – The events that should be read. This can be given either -as list of indicies (in index_column); or a string-based -selection used to query the Dataset for events passing the -selection. Defaults to None, meaning that all events in the -input files are read.

  • -
  • dtype (dtype, default: torch.float32) – Type of the feature tensor on the graph objects returned.

  • -
  • loss_weight_table (Optional[str], default: None) – Name of the table containing per-event loss -weights.

  • -
  • loss_weight_column (Optional[str], default: None) – Name of the column in loss_weight_table -containing per-event loss weights. This is also the name of the -corresponding attribute assigned to the graph object.

  • -
  • loss_weight_default_value (Optional[float], default: None) – Default per-event loss weight. -NOTE: This default value is only applied when -loss_weight_table and loss_weight_column are specified, and -in this case to events with no value in the corresponding -table/column. That is, if no per-event loss weight table/column -is provided, this value is ignored. Defaults to None.

  • -
  • seed (Optional[int], default: None) – Random number generator seed, used for selecting a random -subset of events when resolving a string-based selection (e.g., -“10000 random events ~ event_no % 5 > 0” or “20% random -events ~ event_no % 5 > 0”).

  • -
  • graph_definition (GraphDefinition) – Method that defines the graph representation.

  • -
  • args (Any) –

  • -
  • kwargs (Any) –

  • -
-
-
Return type:
-

object

-
-
-
-
-query_table(table, columns, sequential_index, selection)[source]
-

Query table at a specific index, optionally with some selection.

-
-
Return type:
-

List[Tuple[Any, ...]]

-
-
Parameters:
-
    -
  • table (str) –

  • -
  • columns (List[str] | str) –

  • -
  • sequential_index (int | None) –

  • -
  • selection (str | None) –

  • -
-
-
-
-
+
+

parquet_dataset

diff --git a/api/graphnet.data.dataset.sqlite.html b/api/graphnet.data.dataset.sqlite.html index 4111fca68..80b6ea9f8 100644 --- a/api/graphnet.data.dataset.sqlite.html +++ b/api/graphnet.data.dataset.sqlite.html @@ -475,16 +475,12 @@
-
-

sqlite

-

Datasets using SQLite backend.

+
+

sqlite

Submodules

diff --git a/api/graphnet.data.dataset.sqlite.sqlite_dataset.html b/api/graphnet.data.dataset.sqlite.sqlite_dataset.html index 121d22fc0..ef68e5f92 100644 --- a/api/graphnet.data.dataset.sqlite.sqlite_dataset.html +++ b/api/graphnet.data.dataset.sqlite.sqlite_dataset.html @@ -335,36 +335,11 @@ - - @@ -491,18 +466,7 @@
@@ -512,87 +476,8 @@
-
-

sqlite_dataset

-

Dataset class(es) for reading data from SQLite databases.

-
-
-class graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset(*args, **kwargs)[source]
-

Bases: Dataset

-

Pytorch dataset for reading data from SQLite databases.

-

Construct Dataset.

-
-
Parameters:
-
    -
  • path (Union[str, List[str]]) – Path to the file(s) from which this Dataset should read.

  • -
  • pulsemaps (Union[str, List[str]]) – Name(s) of the pulse map series that should be used to -construct the nodes on the individual graph objects, and their -features. Multiple pulse series maps can be used, e.g., when -different DOM types are stored in different maps.

  • -
  • features (List[str]) – List of columns in the input files that should be used as -node features on the graph objects.

  • -
  • truth (List[str]) – List of event-level columns in the input files that should -be used added as attributes on the graph objects.

  • -
  • node_truth (Optional[List[str]], default: None) – List of node-level columns in the input files that -should be used added as attributes on the graph objects.

  • -
  • index_column (str, default: 'event_no') – Name of the column in the input files that contains -unique indicies to identify and map events across tables.

  • -
  • truth_table (str, default: 'truth') – Name of the table containing event-level truth -information.

  • -
  • node_truth_table (Optional[str], default: None) – Name of the table containing node-level truth -information.

  • -
  • string_selection (Optional[List[int]], default: None) – Subset of strings for which data should be read -and used to construct graph objects. Defaults to None, meaning -all strings for which data exists are used.

  • -
  • selection (Union[str, List[int], List[List[int]], None], default: None) – The events that should be read. This can be given either -as list of indicies (in index_column); or a string-based -selection used to query the Dataset for events passing the -selection. Defaults to None, meaning that all events in the -input files are read.

  • -
  • dtype (dtype, default: torch.float32) – Type of the feature tensor on the graph objects returned.

  • -
  • loss_weight_table (Optional[str], default: None) – Name of the table containing per-event loss -weights.

  • -
  • loss_weight_column (Optional[str], default: None) – Name of the column in loss_weight_table -containing per-event loss weights. This is also the name of the -corresponding attribute assigned to the graph object.

  • -
  • loss_weight_default_value (Optional[float], default: None) – Default per-event loss weight. -NOTE: This default value is only applied when -loss_weight_table and loss_weight_column are specified, and -in this case to events with no value in the corresponding -table/column. That is, if no per-event loss weight table/column -is provided, this value is ignored. Defaults to None.

  • -
  • seed (Optional[int], default: None) – Random number generator seed, used for selecting a random -subset of events when resolving a string-based selection (e.g., -“10000 random events ~ event_no % 5 > 0” or “20% random -events ~ event_no % 5 > 0”).

  • -
  • graph_definition (GraphDefinition) – Method that defines the graph representation.

  • -
  • args (Any) –

  • -
  • kwargs (Any) –

  • -
-
-
Return type:
-

object

-
-
-
-
-query_table(table, columns, sequential_index, selection)[source]
-

Query table at a specific index, optionally with some selection.

-
-
Return type:
-

List[Tuple[Any, ...]]

-
-
Parameters:
-
    -
  • table (str) –

  • -
  • columns (List[str] | str) –

  • -
  • sequential_index (int | None) –

  • -
  • selection (str | None) –

  • -
-
-
-
-
+
+

sqlite_dataset

diff --git a/api/graphnet.data.html b/api/graphnet.data.html index 57b5f245a..fade8c12d 100644 --- a/api/graphnet.data.html +++ b/api/graphnet.data.html @@ -507,16 +507,8 @@
  • DataConverter
  • -
  • dataloader -
  • -
  • pipeline -
  • +
  • dataloader
  • +
  • pipeline
  • diff --git a/api/graphnet.data.pipeline.html b/api/graphnet.data.pipeline.html index 3e14a0e83..36a30804d 100644 --- a/api/graphnet.data.pipeline.html +++ b/api/graphnet.data.pipeline.html @@ -372,25 +372,11 @@ - - @@ -450,14 +436,7 @@
    @@ -467,36 +446,8 @@
    -
    -

    pipeline

    -

    Class(es) used for analysis in PISA.

    -
    -
    -class graphnet.data.pipeline.InSQLitePipeline(module_dict, features, truth, device, retro_table_name, outdir, batch_size, n_workers, pipeline_name)[source]
    -

    Bases: ABC, Logger

    -

    Create a SQLite database for PISA analysis.

    -

    The database will contain truth and GNN predictions and, if available, -RETRO reconstructions.

    -

    Initialise the pipeline.

    -
    -
    Parameters:
    -
      -
    • module_dict (Dict) – A dictionary with GNN modules from GraphNet. E.g. -{‘energy’: gnn_module_for_energy_regression}

    • -
    • features (List[str]) – List of input features for the GNN modules.

    • -
    • truth (List[str]) – List of truth for the GNN ModuleList.

    • -
    • device (device) – The device used for computation.

    • -
    • retro_table_name (str, default: 'retro') – Name of the retro table for.

    • -
    • outdir (Optional[str], default: None) – the directory in which the pipeline database will be -stored.

    • -
    • batch_size (int, default: 100) – Batch size for inference.

    • -
    • n_workers (int, default: 10) – Number of workers used in dataloading.

    • -
    • pipeline_name (str, default: 'pipeline') – Name of the pipeline. If such a pipeline already -exists, an error will be prompted to avoid overwriting.

    • -
    -
    -
    -
    +
    +

    pipeline

    diff --git a/api/graphnet.data.utilities.string_selection_resolver.html b/api/graphnet.data.utilities.string_selection_resolver.html index 2be85aed2..39d28f20c 100644 --- a/api/graphnet.data.utilities.string_selection_resolver.html +++ b/api/graphnet.data.utilities.string_selection_resolver.html @@ -552,7 +552,7 @@
    Parameters:
    @@ -427,18 +395,7 @@ @@ -448,94 +405,8 @@
    -
    -

    graphnet_module

    -

    Class(es) for deploying GraphNeT models in icetray as I3Modules.

    -
    -
    -class graphnet.deployment.i3modules.graphnet_module.GraphNeTI3Module(graph_definition, pulsemap, features, pulsemap_extractor, gcd_file)[source]
    -

    Bases: object

    -

    Base I3 Module for GraphNeT.

    -

    Contains methods for extracting pulsemaps, producing graphs and writing to -frames.

    -

    I3Module Constructor.

    -
    -
    Parameters:
    -
      -
    • graph_definition (GraphDefinition) – An instance of GraphDefinition. E.g. KNNGraph.

    • -
    • pulsemap (str) – the pulse map on which the module functions

    • -
    • features (List[str]) – the features that is used from the pulse map. -E.g. [dom_x, dom_y, dom_z, charge]

    • -
    • pulsemap_extractor (Union[List[I3FeatureExtractor], I3FeatureExtractor]) – The I3FeatureExtractor used to extract the -pulsemap from the I3Frames

    • -
    • gcd_file (str) – Path to the associated gcd-file.

    • -
    -
    -
    -
    -
    -
    -class graphnet.deployment.i3modules.graphnet_module.I3InferenceModule(pulsemap, features, pulsemap_extractor, model_config, state_dict, model_name, gcd_file, prediction_columns)[source]
    -

    Bases: GraphNeTI3Module

    -

    General class for inference on i3 frames.

    -

    General class for inference on I3Frames (physics).

    -
    -
    Parameters:
    -
      -
    • pulsemap (str) – the pulsmap that the model is expecting as input.

    • -
    • features (List[str]) – the features of the pulsemap that the model is expecting.

    • -
    • pulsemap_extractor (Union[List[I3FeatureExtractor], I3FeatureExtractor]) – The extractor used to extract the pulsemap.

    • -
    • model_config (Union[ModelConfig, str]) – The ModelConfig (or path to it) that summarizes the -model used for inference.

    • -
    • state_dict (str) – Path to state_dict containing the learned weights.

    • -
    • model_name (str) – The name used for the model. Will help define the -named entry in the I3Frame. E.g. “dynedge”.

    • -
    • gcd_file (str) – path to associated gcd file.

    • -
    • prediction_columns (Union[str, List[str], None], default: None) –

      column names for the predictions of the model. -Will help define the named entry in the I3Frame.

      -
      -

      E.g. [‘energy_reco’]. Optional.

      -
      -

    • -
    -
    -
    -
    -
    -
    -class graphnet.deployment.i3modules.graphnet_module.I3PulseCleanerModule(pulsemap, features, pulsemap_extractor, model_config, state_dict, model_name, *, gcd_file, threshold, discard_empty_events, prediction_columns)[source]
    -

    Bases: I3InferenceModule

    -

    A specialized module for pulse cleaning.

    -

    It is assumed that the model provided has been trained for this.

    -

    General class for inference on I3Frames (physics).

    -
    -
    Parameters:
    -
      -
    • pulsemap (str) – the pulsmap that the model is expecting as input -(the one that is being cleaned).

    • -
    • features (List[str]) – the features of the pulsemap that the model is expecting.

    • -
    • pulsemap_extractor (Union[List[I3FeatureExtractor], I3FeatureExtractor]) – The extractor used to extract the pulsemap.

    • -
    • model_config (str) – The ModelConfig (or path to it) that summarizes the -model used for inference.

    • -
    • state_dict (str) – Path to state_dict containing the learned weights.

    • -
    • model_name (str) – The name used for the model. Will help define the named -entry in the I3Frame. E.g. “dynedge”.

    • -
    • gcd_file (str) – path to associated gcd file.

    • -
    • threshold (float, default: 0.7) – the threshold for being considered a positive case. -E.g., predictions >= threshold will be considered -to be signal, all else noise.

    • -
    • discard_empty_events (bool, default: False) – When true, this flag will eliminate events -whose cleaned pulse series are empty. Can be used -to speed up processing especially for noise -simulation, since it will not do any writing or -further calculations.

    • -
    • prediction_columns (Union[str, List[str], None], default: None) – column names for the predictions of the model. -Will help define the named entry in the I3Frame. -E.g. [‘energy_reco’]. Optional.

    • -
    -
    -
    -
    +
    +

    graphnet_module

    diff --git a/api/graphnet.deployment.i3modules.html b/api/graphnet.deployment.i3modules.html index 975ce8207..66dbbb52c 100644 --- a/api/graphnet.deployment.i3modules.html +++ b/api/graphnet.deployment.i3modules.html @@ -410,12 +410,7 @@

    i3modules

    diff --git a/api/graphnet.models.coarsening.html b/api/graphnet.models.coarsening.html index 4257e142b..c7db8c1b8 100644 --- a/api/graphnet.models.coarsening.html +++ b/api/graphnet.models.coarsening.html @@ -125,7 +125,6 @@ - @@ -366,90 +365,11 @@ - -
  • @@ -516,30 +436,7 @@ @@ -549,134 +446,8 @@
    -
    -

    coarsening

    -

    Class(es) for coarsening operations (i.e., clustering, or local pooling).

    -
    -
    -graphnet.models.coarsening.unbatch_edge_index(edge_index, batch)[source]
    -

    Splits the edge_index according to a batch vector.

    -
    -
    Parameters:
    -
      -
    • edge_index (Tensor) – The edge_index tensor. Must be ordered.

    • -
    • batch (LongTensor) – The batch vector -\(\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N\), which assigns each -node to a specific example. Must be ordered.

    • -
    -
    -
    Return type:
    -

    List[Tensor]

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.Coarsening(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for coarsening operations.

    -

    Construct Coarsening.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -reduce_options = {'avg': (<function avg_pool>, <function avg_pool_x>), 'max': (<function max_pool>, <function max_pool_x>), 'min': (<function min_pool>, <function min_pool_x>), 'sum': (<function sum_pool>, <function sum_pool_x>)}
    -
    -
    -
    -forward(data)[source]
    -

    Perform coarsening operation.

    -
    -
    Return type:
    -

    Union[Data, Batch]

    -
    -
    Parameters:
    -

    data (Data | Batch) –

    -
    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.AttributeCoarsening(*args, **kwargs)[source]
    -

    Bases: Coarsening

    -

    Coarsen pulses based on specified attributes.

    -

    Construct SimpleCoarsening.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.DOMCoarsening(*args, **kwargs)[source]
    -

    Bases: Coarsening

    -

    Coarsen pulses to DOM-level.

    -

    Cluster pulses on the same DOM.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.CustomDOMCoarsening(*args, **kwargs)[source]
    -

    Bases: DOMCoarsening

    -

    Coarsen pulses to DOM-level with additional attributes.

    -

    Cluster pulses on the same DOM.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.DOMAndTimeWindowCoarsening(*args, **kwargs)[source]
    -

    Bases: Coarsening

    -

    Coarsen pulses to DOM-level, with additional time-window clustering.

    -

    Cluster pulses on the same DOM within time_window.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    coarsening

    diff --git a/api/graphnet.models.components.html b/api/graphnet.models.components.html index a46e005c6..c20fad22b 100644 --- a/api/graphnet.models.components.html +++ b/api/graphnet.models.components.html @@ -460,31 +460,13 @@
    -
    -

    components

    -

    Components for constructing models.

    +
    +

    components

    Submodules

    diff --git a/api/graphnet.models.components.layers.html b/api/graphnet.models.components.layers.html index 06e348db9..55d517e09 100644 --- a/api/graphnet.models.components.layers.html +++ b/api/graphnet.models.components.layers.html @@ -336,94 +336,11 @@ - -
  • @@ -534,34 +451,7 @@
    @@ -571,145 +461,8 @@
    -
    -

    layers

    -

    Class(es) implementing layers to be used in graphnet models.

    -
    -
    -class graphnet.models.components.layers.DynEdgeConv(nn, aggr, nb_neighbors, features_subset, **kwargs)[source]
    -

    Bases: EdgeConv, LightningModule

    -

    Dynamical edge convolution layer.

    -

    Construct DynEdgeConv.

    -
    -
    Parameters:
    -
      -
    • nn (Callable) – The MLP/torch.Module to be used within the EdgeConv.

    • -
    • aggr (str, default: 'max') – Aggregation method to be used with EdgeConv.

    • -
    • nb_neighbors (int, default: 8) – Number of neighbours to be clustered after the -EdgeConv operation.

    • -
    • features_subset (Union[Sequence[int], slice, None], default: None) – Subset of features in Data.x that should be used -when dynamically performing the new graph clustering after the -EdgeConv operation. Defaults to all features.

    • -
    • **kwargs (Any) – Additional features to be passed to EdgeConv.

    • -
    -
    -
    -
    -
    -forward(x, edge_index, batch)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x (Tensor) –

    • -
    • edge_index (Tensor | SparseTensor) –

    • -
    • batch (Tensor | None) –

    • -
    -
    -
    -
    -
    -
    -
    -class graphnet.models.components.layers.EdgeConvTito(nn, aggr, **kwargs)[source]
    -

    Bases: MessagePassing, LightningModule

    -

    Implementation of EdgeConvTito layer used in TITO solution for.

    -

    ‘IceCube - Neutrinos in Deep’ kaggle competition.

    -

    Construct EdgeConvTito.

    -
    -
    Parameters:
    -
      -
    • nn (Callable) – The MLP/torch.Module to be used within the EdgeConvTito.

    • -
    • aggr (str, default: 'max') – Aggregation method to be used with EdgeConvTito.

    • -
    • **kwargs (Any) – Additional features to be passed to EdgeConvTito.

    • -
    -
    -
    -
    -
    -reset_parameters()[source]
    -

    Reset all learnable parameters of the module.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -forward(x, edge_index)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x (Tensor | Tuple[Tensor, Tensor]) –

    • -
    • edge_index (Tensor | SparseTensor) –

    • -
    -
    -
    -
    -
    -
    -message(x_i, x_j)[source]
    -

    Edgeconvtito message passing.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x_i (Tensor) –

    • -
    • x_j (Tensor) –

    • -
    -
    -
    -
    -
    -
    -
    -class graphnet.models.components.layers.DynTrans(layer_sizes, aggr, features_subset, n_head, **kwargs)[source]
    -

    Bases: EdgeConvTito, LightningModule

    -

    Implementation of dynTrans1 layer used in TITO solution for.

    -

    ‘IceCube - Neutrinos in Deep’ kaggle competition.

    -

    Construct DynTrans.

    -
    -
    Parameters:
    -
      -
    • nn – The MLP/torch.Module to be used within the DynTrans.

    • -
    • layer_sizes (Optional[List[int]], default: None) – List of layer sizes to be used in DynTrans.

    • -
    • aggr (str, default: 'max') – Aggregation method to be used with DynTrans.

    • -
    • features_subset (Union[Sequence[int], slice, None], default: None) – Subset of features in Data.x that should be used -when dynamically performing the new graph clustering after the -EdgeConv operation. Defaults to all features.

    • -
    • n_head (int, default: 8) – Number of heads to be used in the multiheadattention models.

    • -
    • **kwargs (Any) – Additional features to be passed to DynTrans.

    • -
    -
    -
    -
    -
    -forward(x, edge_index, batch)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x (Tensor) –

    • -
    • edge_index (Tensor | SparseTensor) –

    • -
    • batch (Tensor | None) –

    • -
    -
    -
    -
    -
    +
    +

    layers

    diff --git a/api/graphnet.models.components.pool.html b/api/graphnet.models.components.pool.html index 16bcb18ac..8e89e81a0 100644 --- a/api/graphnet.models.components.pool.html +++ b/api/graphnet.models.components.pool.html @@ -125,7 +125,6 @@ - @@ -344,106 +343,11 @@ - -
  • @@ -547,32 +451,7 @@ @@ -582,220 +461,8 @@
    -
    -

    pool

    -

    Functions for performing pooling/clustering/coarsening.

    -
    -
    -graphnet.models.components.pool.min_pool(cluster, data, transform)[source]
    -

    Perform min-pooling of Data.

    -

    Like max_pool, just negating `data.x.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) –

    • -
    • data (Data) –

    • -
    • transform (Any | None) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.components.pool.min_pool_x(cluster, x, batch, size)[source]
    -

    Perform min-pooling of Tensor.

    -

    Like max_pool_x, just negating `x.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) –

    • -
    • x (Tensor) –

    • -
    • batch (LongTensor) –

    • -
    • size (int | None) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.components.pool.sum_pool_and_distribute(tensor, cluster_index, batch)[source]
    -

    Sum-pool values and distribute result to the individual nodes.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • tensor (Tensor) –

    • -
    • cluster_index (LongTensor) –

    • -
    • batch (LongTensor | None) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.components.pool.group_by(data, keys)[source]
    -

    Group nodes in data that have identical values of keys.

    -

    This grouping is done with in each event in case of batching. This allows -for, e.g., assigning the same index to all pulses on the same PMT or DOM in -the same event. This can be used for coarsening graphs, e.g., from pulse- -level to DOM-level by aggregating feature across each group returned by this -method.

    -
    -
    Return type:
    -

    LongTensor

    -
    -
    Parameters:
    -
      -
    • data (Data | Batch) –

    • -
    • keys (List[str]) –

    • -
    -
    -
    -

    Example

    -
    -
    Given:

    data.f1 = [1,1,2,2,2] -data.f2 = [6,7,7,7,8]

    -
    -
    Calls:

    groupby(data, [‘f1’]) -> [0, 0, 1, 1, 1] -groupby(data, [‘f2’]) -> [0, 1, 1, 1, 2] -groupby(data, [‘f1’, ‘f2’]) -> [0, 1, 2, 2, 3]

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.group_pulses_to_dom(data)[source]
    -

    Group pulses on the same DOM, using DOM and string number.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.group_pulses_to_pmt(data)[source]
    -

    Group pulses on the same PMT, using PMT, DOM, and string number.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.sum_pool_x(cluster, x, batch, size)[source]
    -

    Sum-pool node features according to the clustering defined in cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • x (Tensor) – Node feature matrix -\(\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}\).

    • -
    • batch (LongTensor) – Batch vector \(\mathbf{b} \in {\{ 0, \ldots, -B-1\}}^N\), which assigns each node to a specific example.

    • -
    • size (Optional[int], default: None) – The maximum number of clusters in a single -example. This property is useful to obtain a batch-wise dense -representation, e.g. for applying FC layers, but should only be -used if the size of the maximum number of clusters per example is -known in advance.

    • -
    -
    -
    Return type:
    -

    Tensor

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.std_pool_x(cluster, x, batch, size)[source]
    -

    Std-pool node features according to the clustering defined in cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • x (Tensor) – Node feature matrix -\(\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}\).

    • -
    • batch (LongTensor) – Batch vector \(\mathbf{b} \in {\{ 0, \ldots, -B-1\}}^N\), which assigns each node to a specific example.

    • -
    • size (Optional[int], default: None) – The maximum number of clusters in a single -example. This property is useful to obtain a batch-wise dense -representation, e.g. for applying FC layers, but should only be -used if the size of the maximum number of clusters per example is -known in advance.

    • -
    -
    -
    Return type:
    -

    Tensor

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.sum_pool(cluster, data, transform)[source]
    -

    Pool and coarsen graph according to the clustering defined in cluster.

    -

    All nodes within the same cluster will be represented as one node. -Final node features are defined by the sum of features of all nodes -within the same cluster, node positions are averaged and edge indices are -defined to be the union of the edge indices of all nodes within the same -cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • data (Data) – Graph data object.

    • -
    • transform (Optional[Callable], default: None) – A function/transform that takes in the -coarsened and pooled torch_geometric.data.Data object and -returns a transformed version.

    • -
    -
    -
    Return type:
    -

    Data

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.std_pool(cluster, data, transform)[source]
    -

    Pool and coarsen graph according to the clustering defined in cluster.

    -

    All nodes within the same cluster will be represented as one node. -Final node features are defined by the std of features of all nodes -within the same cluster, node positions are averaged and edge indices are -defined to be the union of the edge indices of all nodes within the same -cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • data (Data) – Graph data object.

    • -
    • transform (Optional[Callable], default: None) – A function/transform that takes in the -coarsened and pooled torch_geometric.data.Data object and -returns a transformed version.

    • -
    -
    -
    Return type:
    -

    Data

    -
    -
    -
    +
    +

    pool

    diff --git a/api/graphnet.models.detector.detector.html b/api/graphnet.models.detector.detector.html index 1ba47c47f..931b41143 100644 --- a/api/graphnet.models.detector.detector.html +++ b/api/graphnet.models.detector.detector.html @@ -343,45 +343,11 @@ - -
  • @@ -492,20 +458,7 @@
    @@ -515,53 +468,8 @@
    -
    -

    detector

    -

    Base detector-specific Model class(es).

    -
    -
    -class graphnet.models.detector.detector.Detector(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for all detector-specific read-ins in graphnet.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -abstract feature_map()[source]
    -

    List of features used/assumed by inheriting Detector objects.

    -
    -
    Return type:
    -

    Dict[str, Callable]

    -
    -
    -
    -
    -
    -forward(node_features, node_feature_names)[source]
    -

    Pre-process graph Data features and build graph adjacency.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -
      -
    • node_features (tensor) –

    • -
    • node_feature_names (List[str]) –

    • -
    -
    -
    -
    -
    +
    +

    detector

    diff --git a/api/graphnet.models.detector.html b/api/graphnet.models.detector.html index d42b5269f..d45e3c1e4 100644 --- a/api/graphnet.models.detector.html +++ b/api/graphnet.models.detector.html @@ -467,27 +467,14 @@
    -
    -

    detector

    -

    Detector-specific modules, for data ingestion and standardisation.

    +
    +

    detector

    Submodules

    diff --git a/api/graphnet.models.detector.icecube.html b/api/graphnet.models.detector.icecube.html index 11d972fa9..de64dfeb1 100644 --- a/api/graphnet.models.detector.icecube.html +++ b/api/graphnet.models.detector.icecube.html @@ -350,96 +350,11 @@ - -
  • @@ -543,36 +458,7 @@
    @@ -582,121 +468,8 @@
    -
    -

    icecube

    -

    IceCube-specific Detector class(es).

    -
    -
    -class graphnet.models.detector.icecube.IceCube86(*args, **kwargs)[source]
    -

    Bases: Detector

    -

    Detector class for IceCube-86.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -feature_map()[source]
    -

    Map standardization functions to each dimension of input data.

    -
    -
    Return type:
    -

    Dict[str, Callable]

    -
    -
    -
    -
    -
    -
    -class graphnet.models.detector.icecube.IceCubeKaggle(*args, **kwargs)[source]
    -

    Bases: Detector

    -

    Detector class for Kaggle Competition.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -feature_map()[source]
    -

    Map standardization functions to each dimension of input data.

    -
    -
    Return type:
    -

    Dict[str, Callable]

    -
    -
    -
    -
    -
    -
    -class graphnet.models.detector.icecube.IceCubeDeepCore(*args, **kwargs)[source]
    -

    Bases: Detector

    -

    Detector class for IceCube-DeepCore.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -feature_map()[source]
    -

    Map standardization functions to each dimension of input data.

    -
    -
    Return type:
    -

    Dict[str, Callable]

    -
    -
    -
    -
    -
    -
    -class graphnet.models.detector.icecube.IceCubeUpgrade(*args, **kwargs)[source]
    -

    Bases: Detector

    -

    Detector class for IceCube-Upgrade.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -feature_map()[source]
    -

    Map standardization functions to each dimension of input data.

    -
    -
    Return type:
    -

    Dict[str, Callable]

    -
    -
    -
    -
    +
    +

    icecube

    diff --git a/api/graphnet.models.detector.prometheus.html b/api/graphnet.models.detector.prometheus.html index 8aa25084d..78e40c97d 100644 --- a/api/graphnet.models.detector.prometheus.html +++ b/api/graphnet.models.detector.prometheus.html @@ -357,36 +357,11 @@ - -
  • @@ -483,18 +458,7 @@
    @@ -504,37 +468,8 @@
    -
    -

    prometheus

    -

    Prometheus-specific Detector class(es).

    -
    -
    -class graphnet.models.detector.prometheus.Prometheus(*args, **kwargs)[source]
    -

    Bases: Detector

    -

    Detector class for Prometheus prototype.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -feature_map()[source]
    -

    Map standardization functions to each dimension.

    -
    -
    Return type:
    -

    Dict[str, Callable]

    -
    -
    -
    -
    +
    +

    prometheus

    diff --git a/api/graphnet.models.gnn.convnet.html b/api/graphnet.models.gnn.convnet.html index 9ba1f86af..b0041fc67 100644 --- a/api/graphnet.models.gnn.convnet.html +++ b/api/graphnet.models.gnn.convnet.html @@ -350,36 +350,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,47 +482,8 @@
    -
    -

    convnet

    -

    Implementation of the ConvNet GNN model architecture.

    -

    Author: Martin Ha Minh

    -
    -
    -class graphnet.models.gnn.convnet.ConvNet(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    ConvNet (convolutional network) model.

    -

    Construct ConvNet.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features, i.e. dimension of input -layer.

    • -
    • nb_outputs (int) – Number of prediction labels, i.e. dimension of -output layer.

    • -
    • nb_intermediate (int, default: 128) – Number of nodes in intermediate layer(s).

    • -
    • dropout_ratio (float, default: 0.3) – Fraction of nodes to drop.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    convnet

    diff --git a/api/graphnet.models.gnn.dynedge.html b/api/graphnet.models.gnn.dynedge.html index 836c517ca..9401def0a 100644 --- a/api/graphnet.models.gnn.dynedge.html +++ b/api/graphnet.models.gnn.dynedge.html @@ -357,36 +357,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,69 +482,8 @@
    -
    -

    dynedge

    -

    Implementation of the DynEdge GNN model architecture.

    -
    -
    -class graphnet.models.gnn.dynedge.DynEdge(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    DynEdge (dynamical edge convolutional) model.

    -

    Construct DynEdge.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features on each node.

    • -
    • nb_neighbours (int, default: 8) – Number of neighbours to used in the k-nearest -neighbour clustering which is performed after each (dynamical) -edge convolution.

    • -
    • features_subset (Union[List[int], slice, None], default: None) – The subset of latent features on each node that -are used as metric dimensions when performing the k-nearest -neighbours clustering. Defaults to [0,1,2].

    • -
    • dynedge_layer_sizes (Optional[List[Tuple[int, ...]]], default: None) – The layer sizes, or latent feature dimenions, -used in the DynEdgeConv layer. Each entry in -dynedge_layer_sizes corresponds to a single DynEdgeConv -layer; the integers in the corresponding tuple corresponds to -the layer sizes in the multi-layer perceptron (MLP) that is -applied within each DynEdgeConv layer. That is, a list of -size-two tuples means that all DynEdgeConv layers contain a -two-layer MLP. -Defaults to [(128, 256), (336, 256), (336, 256), (336, 256)].

    • -
    • post_processing_layer_sizes (Optional[List[int]], default: None) – Hidden layer sizes in the MLP -following the skip-concatenation of the outputs of each -DynEdgeConv layer. Defaults to [336, 256].

    • -
    • readout_layer_sizes (Optional[List[int]], default: None) – Hidden layer sizes in the MLP following the -post-processing _and_ optional global pooling. As this is the -last layer(s) in the model, the last layer in the read-out -yields the output of the DynEdge model. Defaults to [128,].

    • -
    • global_pooling_schemes (Union[str, List[str], None], default: None) – The list global pooling schemes to use. -Options are: “min”, “max”, “mean”, and “sum”.

    • -
    • add_global_variables_after_pooling (bool, default: False) – Whether to add global variables -after global pooling. The alternative is to added (distribute) -them to the individual nodes before any convolutional -operations.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    dynedge

    diff --git a/api/graphnet.models.gnn.dynedge_jinst.html b/api/graphnet.models.gnn.dynedge_jinst.html index 17e28429c..fe9562a2c 100644 --- a/api/graphnet.models.gnn.dynedge_jinst.html +++ b/api/graphnet.models.gnn.dynedge_jinst.html @@ -364,36 +364,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,44 +482,8 @@
    -
    -

    dynedge_jinst

    -

    Implementation of the exact DynEdge architecture used in [2209.03042].

    -

    Author: Rasmus Oersoe

    -
    -
    -class graphnet.models.gnn.dynedge_jinst.DynEdgeJINST(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    DynEdge (dynamical edge convolutional) model used in [2209.03042].

    -

    Construct DynEdgeJINST.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features.

    • -
    • nb_outputs – Number of output features.

    • -
    • layer_size_scale (int, default: 4) – Integer that scales the size of hidden layers.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    dynedge_jinst

    diff --git a/api/graphnet.models.gnn.dynedge_kaggle_tito.html b/api/graphnet.models.gnn.dynedge_kaggle_tito.html index 82027f88a..2dc9b22c0 100644 --- a/api/graphnet.models.gnn.dynedge_kaggle_tito.html +++ b/api/graphnet.models.gnn.dynedge_kaggle_tito.html @@ -371,36 +371,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,54 +482,8 @@
    -
    -

    dynedge_kaggle_tito

    -

    Implementation of DynEdge architecture used in.

    -
    -

    IceCube - Neutrinos in Deep Ice

    -
    -

    Reconstruct the direction of neutrinos from the Universe to the South Pole

    -

    Kaggle competition.

    -

    Solution by TITO.

    -
    -
    -class graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    DynEdge (dynamical edge convolutional) model.

    -

    Construct DynEdge.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features on each node.

    • -
    • features_subset (slice, default: slice(0, 4, None)) – The subset of latent features on each node that -are used as metric dimensions when performing the k-nearest -neighbours clustering. Defaults to [0,1,2,3].

    • -
    • dyntrans_layer_sizes (Optional[List[Tuple[int, ...]]], default: None) – The layer sizes, or latent feature dimenions, -used in the DynTrans layer.

    • -
    • global_pooling_schemes (List[str], default: ['max']) – The list global pooling schemes to use. -Options are: “min”, “max”, “mean”, and “sum”.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    dynedge_kaggle_tito

    diff --git a/api/graphnet.models.gnn.gnn.html b/api/graphnet.models.gnn.gnn.html index 537c08b73..fc1e35c49 100644 --- a/api/graphnet.models.gnn.gnn.html +++ b/api/graphnet.models.gnn.gnn.html @@ -378,54 +378,11 @@ - -
  • @@ -515,22 +472,7 @@
    @@ -540,50 +482,8 @@
    -
    -

    gnn

    -

    Base GNN-specific Model class(es).

    -
    -
    -class graphnet.models.gnn.gnn.GNN(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for all core GNN models in graphnet.

    -

    Construct GNN.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -property nb_inputs: int
    -

    Return number of input features.

    -
    -
    -
    -property nb_outputs: int
    -

    Return number of output features.

    -
    -
    -
    -abstract forward(data)[source]
    -

    Apply learnable forward pass in model.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    gnn

    diff --git a/api/graphnet.models.gnn.html b/api/graphnet.models.gnn.html index be8f47a7f..9ca582360 100644 --- a/api/graphnet.models.gnn.html +++ b/api/graphnet.models.gnn.html @@ -481,32 +481,16 @@
    -
    -

    gnn

    -

    GNN-specific modules, for performing the main learnable operations.

    +
    +

    gnn

    Submodules

    diff --git a/api/graphnet.models.graphs.edges.edges.html b/api/graphnet.models.graphs.edges.edges.html index 01668c62f..2b12e8c1d 100644 --- a/api/graphnet.models.graphs.edges.edges.html +++ b/api/graphnet.models.graphs.edges.edges.html @@ -363,63 +363,11 @@ - - @@ -525,24 +473,7 @@
    @@ -552,114 +483,8 @@
    -
    -

    edges

    -

    Class(es) for building/connecting graphs.

    -
    -
    -class graphnet.models.graphs.edges.edges.EdgeDefinition(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for graph building.

    -

    Construct Logger.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(graph)[source]
    -

    Construct edges based on problem specific implementation of.

    -

    ´_construct_edges´

    -
    -
    Parameters:
    -

    graph (Data) – a graph without edges

    -
    -
    Returns:
    -

    a graph with edges

    -
    -
    Return type:
    -

    graph

    -
    -
    -
    -
    -
    -
    -class graphnet.models.graphs.edges.edges.KNNEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds edges from the k-nearest neighbours.

    -

    K-NN Edge definition.

    -

    Will connect nodes together with their ´nb_nearest_neighbours´ -nearest neighbours in the feature space given by ´columns´.

    -
    -
    Parameters:
    -
      -
    • nb_nearest_neighbours (int) – number of neighbours.

    • -
    • columns (List[int], default: [0, 1, 2]) – Node features to use for distance calculation.

    • -
    • [0 (Defaults to) –

    • -
    • 1

    • -
    • 2].

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.graphs.edges.edges.RadialEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds graph from a sphere of chosen radius centred at each node.

    -

    Radial edges.

    -

    Connects each node to other nodes that are within a sphere of -radius ´r´ centered at the node. The feature space of ´r´ is defined -by ´columns´

    -
    -
    Parameters:
    -
      -
    • radius (float) – radius of sphere

    • -
    • columns (List[int], default: [0, 1, 2]) – columns of the node feature matrix used.

    • -
    • [0 (Defaults to) –

    • -
    • 1

    • -
    • 2].

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.graphs.edges.edges.EuclideanEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds edges according to Euclidean distance between nodes.

    -

    See https://arxiv.org/pdf/1809.06166.pdf.

    -

    Construct EuclideanEdges.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    edges

    diff --git a/api/graphnet.models.graphs.edges.html b/api/graphnet.models.graphs.edges.html index e4feb5f18..82aa56cac 100644 --- a/api/graphnet.models.graphs.edges.html +++ b/api/graphnet.models.graphs.edges.html @@ -482,22 +482,12 @@
    -
    -

    edges

    -

    Modules for constructing graphs.

    -

    ´GraphDefinition´ defines the nodes and their features, and contains general -graph-manipulation.´EdgeDefinition´ defines how edges are drawn between nodes -and their features.

    +
    +

    edges

    Submodules

    diff --git a/api/graphnet.models.graphs.graph_definition.html b/api/graphnet.models.graphs.graph_definition.html index d4b8a4846..5a788f2a4 100644 --- a/api/graphnet.models.graphs.graph_definition.html +++ b/api/graphnet.models.graphs.graph_definition.html @@ -371,36 +371,11 @@ - -
  • @@ -490,18 +465,7 @@
    @@ -511,73 +475,8 @@
    -
    -

    graph_definition

    -

    Modules for defining graphs.

    -

    These are self-contained graph definitions that hold all the graph-altering -code in graphnet. These modules define what the GNNs sees as input and can be -passed to dataloaders during training and deployment.

    -
    -
    -class graphnet.models.graphs.graph_definition.GraphDefinition(*args, **kwargs)[source]
    -

    Bases: Model

    -

    An Abstract class to create graph definitions from.

    -

    Construct ´GraphDefinition´. The ´detector´ holds.

    -

    ´Detector´-specific code. E.g. scaling/standardization and geometry -tables.

    -

    ´node_definition´ defines the nodes in the graph.

    -

    ´edge_definition´ defines the connectivity of the nodes in the graph.

    -
    -
    Parameters:
    -
      -
    • detector (Detector) – The corresponding ´Detector´ representing the data.

    • -
    • node_definition (NodeDefinition, default: NodesAsPulses()) – Definition of nodes. Defaults to NodesAsPulses.

    • -
    • edge_definition (Optional[EdgeDefinition], default: None) – Definition of edges. Defaults to None.

    • -
    • node_feature_names (Optional[List[str]], default: None) – Names of node feature columns. Defaults to None

    • -
    • dtype (Optional[dtype], default: torch.float32) – data type used for node features. e.g. ´torch.float´

    • -
    • perturbation_dict (Optional[Dict[str, float]], default: None) – Dictionary mapping a feature name to a standard -deviation according to which the values for this -feature should be randomly perturbed. Defaults -to None.

    • -
    • seed (Union[int, Generator, None], default: None) – seed or Generator used to randomly sample perturbations. -Defaults to None.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(node_features, node_feature_names, truth_dicts, custom_label_functions, loss_weight_column, loss_weight, loss_weight_default_value, data_path)[source]
    -

    Construct graph as ´Data´ object.

    -
    -
    Parameters:
    -
      -
    • node_features (ndarray) – node features for graph. Shape ´[num_nodes, d]´

    • -
    • node_feature_names (List[str]) – name of each column. Shape ´[,d]´.

    • -
    • truth_dicts (Optional[List[Dict[str, Any]]], default: None) – Dictionary containing truth labels.

    • -
    • custom_label_functions (Optional[Dict[str, Callable[..., Any]]], default: None) – Custom label functions. See https://github.com/graphnet-team/graphnet/blob/main/GETTING_STARTED.md#adding-custom-truth-labels.

    • -
    • loss_weight_column (Optional[str], default: None) – Name of column that holds loss weight. -Defaults to None.

    • -
    • loss_weight (Optional[float], default: None) – Loss weight associated with event. Defaults to None.

    • -
    • loss_weight_default_value (Optional[float], default: None) – default value for loss weight. -Used in instances where some events have -no pre-defined loss weight. Defaults to None.

    • -
    • data_path (Optional[str], default: None) – Path to dataset data files. Defaults to None.

    • -
    -
    -
    Return type:
    -

    Data

    -
    -
    Returns:
    -

    graph

    -
    -
    -
    -
    +
    +

    graph_definition

    diff --git a/api/graphnet.models.graphs.graphs.html b/api/graphnet.models.graphs.graphs.html index 88980e719..e3a8822f4 100644 --- a/api/graphnet.models.graphs.graphs.html +++ b/api/graphnet.models.graphs.graphs.html @@ -378,25 +378,11 @@ - -
  • @@ -479,14 +465,7 @@
    @@ -496,42 +475,8 @@
    -
    -

    graphs

    -

    A module containing different graph representations in GraphNeT.

    -
    -
    -class graphnet.models.graphs.graphs.KNNGraph(*args, **kwargs)[source]
    -

    Bases: GraphDefinition

    -

    A Graph representation where Edges are drawn to nearest neighbours.

    -

    Construct k-nn graph representation.

    -
    -
    Parameters:
    -
      -
    • detector (Detector) – Detector that represents your data.

    • -
    • node_definition (NodeDefinition, default: NodesAsPulses()) – Definition of nodes in the graph.

    • -
    • node_feature_names (Optional[List[str]], default: None) – Name of node features.

    • -
    • dtype (Optional[dtype], default: torch.float32) – data type for node features.

    • -
    • perturbation_dict (Optional[Dict[str, float]], default: None) – Dictionary mapping a feature name to a standard -deviation according to which the values for this -feature should be randomly perturbed. Defaults -to None.

    • -
    • seed (Union[int, Generator, None], default: None) – seed or Generator used to randomly sample perturbations. -Defaults to None.

    • -
    • nb_nearest_neighbours (int, default: 8) – Number of edges for each node. Defaults to 8.

    • -
    • columns (List[int], default: [0, 1, 2]) – node feature columns used for distance calculation

    • -
    • [0 (. Defaults to) –

    • -
    • 1

    • -
    • 2].

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    graphs

    diff --git a/api/graphnet.models.graphs.html b/api/graphnet.models.graphs.html index fbbc2ff3e..4a8238f3e 100644 --- a/api/graphnet.models.graphs.html +++ b/api/graphnet.models.graphs.html @@ -474,12 +474,8 @@
    -
    -

    graphs

    -

    Modules for constructing graphs.

    -

    ´GraphDefinition´ defines the nodes and their features, and contains general -graph-manipulation.´EdgeDefinition´ defines how edges are drawn between nodes -and their features.

    +
    +

    graphs

    Subpackages

    diff --git a/api/graphnet.models.graphs.nodes.html b/api/graphnet.models.graphs.nodes.html index 2e99388bb..fffc7c0ee 100644 --- a/api/graphnet.models.graphs.nodes.html +++ b/api/graphnet.models.graphs.nodes.html @@ -482,20 +482,12 @@
    -
    -

    nodes

    -

    Modules for constructing graphs.

    -

    ´GraphDefinition´ defines the nodes and their features, and contains general -graph-manipulation.´EdgeDefinition´ defines how edges are drawn between nodes -and their features.

    +
    +

    nodes

    Submodules

    diff --git a/api/graphnet.models.graphs.nodes.nodes.html b/api/graphnet.models.graphs.nodes.nodes.html index f08873a7c..74d97064a 100644 --- a/api/graphnet.models.graphs.nodes.nodes.html +++ b/api/graphnet.models.graphs.nodes.nodes.html @@ -370,63 +370,11 @@ - - @@ -525,24 +473,7 @@
    @@ -552,83 +483,8 @@
    -
    -

    nodes

    -

    Class(es) for building/connecting graphs.

    -
    -
    -class graphnet.models.graphs.nodes.nodes.NodeDefinition(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for graph building.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(x)[source]
    -

    Construct nodes from raw node features.

    -
    -
    Parameters:
    -
      -
    • x (tensor) – standardized node features with shape ´[num_pulses, d]´,

    • -
    • features. (where ´d´ is the number of node) –

    • -
    -
    -
    Returns:
    -

    a graph without edges

    -
    -
    Return type:
    -

    graph

    -
    -
    -
    -
    -
    -property nb_outputs: int
    -

    Return number of output features.

    -

    This the default, but may be overridden by specific inheriting classes.

    -
    -
    -
    -set_number_of_inputs(node_feature_names)[source]
    -

    Return number of inputs expected by node definition.

    -
    -
    Parameters:
    -

    node_feature_names (List[str]) – name of each node feature column.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -
    -class graphnet.models.graphs.nodes.nodes.NodesAsPulses(*args, **kwargs)[source]
    -

    Bases: NodeDefinition

    -

    Represent each measured pulse of Cherenkov Radiation as a node.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    nodes

    diff --git a/api/graphnet.models.html b/api/graphnet.models.html index d514e1468..3a0158efa 100644 --- a/api/graphnet.models.html +++ b/api/graphnet.models.html @@ -445,14 +445,8 @@
    -
    -

    models

    -

    Modules for configuring and building models.

    -

    graphnet.models allows for configuring and building complex GNN models using -simple, physics-oriented components. This module provides modular components -subclassing torch.nn.Module, meaning that users only need to import a few, -existing, purpose-built components and chain them together to form a complete -GNN

    +
    +

    models

    Subpackages

    diff --git a/api/graphnet.models.model.html b/api/graphnet.models.model.html index cee61ff8f..9dd5bde40 100644 --- a/api/graphnet.models.model.html +++ b/api/graphnet.models.model.html @@ -372,108 +372,11 @@ - -
  • @@ -533,34 +436,7 @@
    @@ -570,183 +446,8 @@
    -
    -

    model

    -

    Base class(es) for building models.

    -
    -
    -class graphnet.models.model.Model(*args, **kwargs)[source]
    -

    Bases: Logger, Configurable, LightningModule, ABC

    -

    Base class for all models in graphnet.

    -

    Construct Logger.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -abstract forward(x)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Union[Tensor, Data]

    -
    -
    Parameters:
    -

    x (Tensor | Data) –

    -
    -
    -
    -
    -
    -fit(train_dataloader, val_dataloader, *, max_epochs, gpus, callbacks, ckpt_path, logger, log_every_n_steps, gradient_clip_val, distribution_strategy, **trainer_kwargs)[source]
    -

    Fit Model using pytorch_lightning.Trainer.

    -
    -
    Return type:
    -

    None

    -
    -
    Parameters:
    -
      -
    • train_dataloader (DataLoader) –

    • -
    • val_dataloader (DataLoader | None) –

    • -
    • max_epochs (int) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • callbacks (List[Callback] | None) –

    • -
    • ckpt_path (str | None) –

    • -
    • logger (Logger | None) –

    • -
    • log_every_n_steps (int) –

    • -
    • gradient_clip_val (float | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    • trainer_kwargs (Any) –

    • -
    -
    -
    -
    -
    -
    -predict(dataloader, gpus, distribution_strategy)[source]
    -

    Return predictions for dataloader.

    -

    Returns a list of Tensors, one for each model output.

    -
    -
    Return type:
    -

    List[Tensor]

    -
    -
    Parameters:
    -
      -
    • dataloader (DataLoader) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    -
    -
    -
    -
    -
    -predict_as_dataframe(dataloader, prediction_columns, *, additional_attributes, gpus, distribution_strategy)[source]
    -

    Return predictions for dataloader as a DataFrame.

    -

    Include additional_attributes as additional columns in the output -DataFrame.

    -
    -
    Return type:
    -

    DataFrame

    -
    -
    Parameters:
    -
      -
    • dataloader (DataLoader) –

    • -
    • prediction_columns (List[str]) –

    • -
    • additional_attributes (List[str] | None) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    -
    -
    -
    -
    -
    -save(path)[source]
    -

    Save entire model to path.

    -
    -
    Return type:
    -

    None

    -
    -
    Parameters:
    -

    path (str) –

    -
    -
    -
    -
    -
    -classmethod load(path)[source]
    -

    Load entire model from path.

    -
    -
    Return type:
    -

    Model

    -
    -
    Parameters:
    -

    path (str) –

    -
    -
    -
    -
    -
    -save_state_dict(path)[source]
    -

    Save model state_dict to path.

    -
    -
    Return type:
    -

    None

    -
    -
    Parameters:
    -

    path (str) –

    -
    -
    -
    -
    -
    -load_state_dict(path, **kargs)[source]
    -

    Load model state_dict from path.

    -
    -
    Return type:
    -

    Model

    -
    -
    Parameters:
    -
      -
    • path (str | Dict) –

    • -
    • kargs (Any | None) –

    • -
    -
    -
    -
    -
    -
    -classmethod from_config(source, trust, load_modules)[source]
    -

    Construct Model instance from source configuration.

    -
    -
    Parameters:
    -
      -
    • trust (bool, default: False) – Whether to trust the ModelConfig file enough to eval(…) -any lambda function expressions contained.

    • -
    • load_modules (Optional[List[str]], default: None) – List of modules used in the definition of the model -which, as a consequence, need to be loaded into the global -namespace. Defaults to loading torch.

    • -
    • source (ModelConfig | str) –

    • -
    -
    -
    Raises:
    -

    ValueError – If the ModelConfig contains lambda functions but - trust = False.

    -
    -
    Return type:
    -

    Model

    -
    -
    -
    -
    +
    +

    model

    diff --git a/api/graphnet.models.standard_model.html b/api/graphnet.models.standard_model.html index 9d0aa3ed8..3bc29a77d 100644 --- a/api/graphnet.models.standard_model.html +++ b/api/graphnet.models.standard_model.html @@ -379,135 +379,11 @@ - -
  • @@ -560,40 +436,7 @@ @@ -603,190 +446,8 @@
    -
    -

    standard_model

    -

    Standard model class(es).

    -
    -
    -class graphnet.models.standard_model.StandardModel(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Main class for standard models in graphnet.

    -

    This class chains together the different elements of a complete GNN-based -model (detector read-in, GNN architecture, and task-specific read-outs).

    -

    Construct StandardModel.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -property target_labels: List[str]
    -

    Return target label.

    -
    -
    -
    -property prediction_labels: List[str]
    -

    Return prediction labels.

    -
    -
    -
    -configure_optimizers()[source]
    -

    Configure the model’s optimizer(s).

    -
    -
    Return type:
    -

    Dict[str, Any]

    -
    -
    -
    -
    -
    -forward(data)[source]
    -

    Forward pass, chaining model components.

    -
    -
    Return type:
    -

    List[Union[Tensor, Data]]

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    -
    -shared_step(batch, batch_idx)[source]
    -

    Perform shared step.

    -

    Applies the forward pass and the following loss calculation, shared -between the training and validation step.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • batch (Data) –

    • -
    • batch_idx (int) –

    • -
    -
    -
    -
    -
    -
    -training_step(train_batch, batch_idx)[source]
    -

    Perform training step.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • train_batch (Data) –

    • -
    • batch_idx (int) –

    • -
    -
    -
    -
    -
    -
    -validation_step(val_batch, batch_idx)[source]
    -

    Perform validation step.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • val_batch (Data) –

    • -
    • batch_idx (int) –

    • -
    -
    -
    -
    -
    -
    -compute_loss(preds, data, verbose)[source]
    -

    Compute and sum losses across tasks.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • preds (Tensor) –

    • -
    • data (Data) –

    • -
    • verbose (bool) –

    • -
    -
    -
    -
    -
    -
    -inference()[source]
    -

    Activate inference mode.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -train(mode)[source]
    -

    Deactivate inference mode.

    -
    -
    Return type:
    -

    Model

    -
    -
    Parameters:
    -

    mode (bool) –

    -
    -
    -
    -
    -
    -predict(dataloader, gpus, distribution_strategy)[source]
    -

    Return predictions for dataloader.

    -
    -
    Return type:
    -

    List[Tensor]

    -
    -
    Parameters:
    -
      -
    • dataloader (DataLoader) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    -
    -
    -
    -
    -
    -predict_as_dataframe(dataloader, prediction_columns, *, additional_attributes, gpus, distribution_strategy)[source]
    -

    Return predictions for dataloader as a DataFrame.

    -

    Include additional_attributes as additional columns in the output -DataFrame.

    -
    -
    Return type:
    -

    DataFrame

    -
    -
    Parameters:
    -
      -
    • dataloader (DataLoader) –

    • -
    • prediction_columns (List[str] | None) –

    • -
    • additional_attributes (List[str] | None) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    -
    -
    -
    -
    +
    +

    standard_model

    diff --git a/api/graphnet.models.task.classification.html b/api/graphnet.models.task.classification.html index 36c17b4fd..6d1271a7a 100644 --- a/api/graphnet.models.task.classification.html +++ b/api/graphnet.models.task.classification.html @@ -364,101 +364,11 @@ - -
  • @@ -548,34 +458,7 @@ @@ -585,158 +468,8 @@
    -
    -

    classification

    -

    Classification-specific Model class(es).

    -
    -
    -class graphnet.models.task.classification.MulticlassClassificationTask(*args, **kwargs)[source]
    -

    Bases: IdentityTask

    -

    General task for classifying any number of classes.

    -

    Requires the same number of input features as the number of classes being -predicted. Returns the untransformed latent features, which are interpreted -as the logits for each class being classified.

    -

    Construct IdentityTask.

    -

    Return the nb_outputs as a direct, affine transformation of the last -hidden layer.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.task.classification.BinaryClassificationTask(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Performs binary classification.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -default_target_labels = ['target']
    -
    -
    -
    -default_prediction_labels = ['target_pred']
    -
    -
    -
    -
    -class graphnet.models.task.classification.BinaryClassificationTaskLogits(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Performs binary classification form logits.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -default_target_labels = ['target']
    -
    -
    -
    -default_prediction_labels = ['target_pred']
    -
    -
    +
    +

    classification

    diff --git a/api/graphnet.models.task.html b/api/graphnet.models.task.html index 0ed5f54b5..021be4813 100644 --- a/api/graphnet.models.task.html +++ b/api/graphnet.models.task.html @@ -467,38 +467,14 @@
    -
    -

    task

    -

    Physics task-specific modules to be used as model “read-outs”.

    +
    +

    task

    Submodules

    diff --git a/api/graphnet.models.task.reconstruction.html b/api/graphnet.models.task.reconstruction.html index d676c53b9..f251a474d 100644 --- a/api/graphnet.models.task.reconstruction.html +++ b/api/graphnet.models.task.reconstruction.html @@ -371,472 +371,11 @@ - -
  • @@ -919,132 +458,7 @@ @@ -1054,766 +468,8 @@
    -
    -

    reconstruction

    -

    Reconstruction-specific Model class(es).

    -
    -
    -class graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs azimuthal angle and associated kappa (1/var).

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['azimuth']
    -
    -
    -
    -default_prediction_labels = ['azimuth_pred', 'azimuth_kappa']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.AzimuthReconstruction(*args, **kwargs)[source]
    -

    Bases: AzimuthReconstructionWithKappa

    -

    Reconstructs azimuthal angle.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['azimuth']
    -
    -
    -
    -default_prediction_labels = ['azimuth_pred']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.DirectionReconstructionWithKappa(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs direction with kappa from the 3D-vMF distribution.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['direction']
    -
    -
    -
    -default_prediction_labels = ['dir_x_pred', 'dir_y_pred', 'dir_z_pred', 'direction_kappa']
    -
    -
    -
    -nb_inputs = 3
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.ZenithReconstruction(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs zenith angle.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['zenith']
    -
    -
    -
    -default_prediction_labels = ['zenith_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.ZenithReconstructionWithKappa(*args, **kwargs)[source]
    -

    Bases: ZenithReconstruction

    -

    Reconstructs zenith angle and associated kappa (1/var).

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['zenith']
    -
    -
    -
    -default_prediction_labels = ['zenith_pred', 'zenith_kappa']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyReconstruction(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs energy using stable method.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy']
    -
    -
    -
    -default_prediction_labels = ['energy_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyReconstructionWithPower(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs energy.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy']
    -
    -
    -
    -default_prediction_labels = ['energy_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty(*args, **kwargs)[source]
    -

    Bases: EnergyReconstruction

    -

    Reconstructs energy and associated uncertainty (log(var)).

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy']
    -
    -
    -
    -default_prediction_labels = ['energy_pred', 'energy_sigma']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.VertexReconstruction(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs vertex position and time.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['vertex']
    -
    -
    -
    -default_prediction_labels = ['position_x_pred', 'position_y_pred', 'position_z_pred', 'interaction_time_pred']
    -
    -
    -
    -nb_inputs = 4
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.PositionReconstruction(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs vertex position.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['position']
    -
    -
    -
    -default_prediction_labels = ['position_x_pred', 'position_y_pred', 'position_z_pred']
    -
    -
    -
    -nb_inputs = 3
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.TimeReconstruction(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs time.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['interaction_time']
    -
    -
    -
    -default_prediction_labels = ['interaction_time_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.InelasticityReconstruction(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Reconstructs interaction inelasticity.

    -

    That is, 1-(track energy / hadronic energy).

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['inelasticity']
    -
    -
    -
    -default_prediction_labels = ['inelasticity_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    +
    +

    reconstruction

    diff --git a/api/graphnet.models.task.task.html b/api/graphnet.models.task.task.html index c557bedc1..a433de9f0 100644 --- a/api/graphnet.models.task.task.html +++ b/api/graphnet.models.task.task.html @@ -378,128 +378,11 @@ - -
  • @@ -575,40 +458,7 @@ @@ -618,160 +468,8 @@
    -
    -

    task

    -

    Base physics task-specific Model class(es).

    -
    -
    -class graphnet.models.task.task.Task(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for all reconstruction and classification tasks.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of nodes in the layer feeding into this -tasks, used to construct the affine transformation to the -predicted quantity.

    • -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -abstract property nb_inputs: int
    -

    Return number of inputs assumed by task.

    -
    -
    -
    -abstract property default_target_labels: List[str]
    -

    Return default target labels.

    -
    -
    -
    -abstract property default_prediction_labels: List[str]
    -

    Return default prediction labels.

    -
    -
    -
    -forward(x)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Union[Tensor, Data]

    -
    -
    Parameters:
    -

    x (Tensor | Data) –

    -
    -
    -
    -
    -
    -compute_loss(pred, data)[source]
    -

    Compute loss of pred wrt.

    -

    target labels in data.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • pred (Tensor | Data) –

    • -
    • data (Data) –

    • -
    -
    -
    -
    -
    -
    -inference()[source]
    -

    Activate inference mode.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -train_eval()[source]
    -

    Deactivate inference mode.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -
    -class graphnet.models.task.task.IdentityTask(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Identity, or trivial, task.

    -

    Construct IdentityTask.

    -

    Return the nb_outputs as a direct, affine transformation of the last -hidden layer.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -property default_target_labels: List[str]
    -

    Return default target labels.

    -
    -
    -
    -property default_prediction_labels: List[str]
    -

    Return default prediction labels.

    -
    -
    -
    -property nb_inputs: int
    -

    Return number of inputs assumed by task.

    -
    -
    +
    +

    task

    diff --git a/api/graphnet.models.utils.html b/api/graphnet.models.utils.html index 475f25794..4dbcdbf2e 100644 --- a/api/graphnet.models.utils.html +++ b/api/graphnet.models.utils.html @@ -386,43 +386,11 @@ - - @@ -468,18 +436,7 @@ @@ -489,69 +446,8 @@
    -
    -

    utils

    -

    Utility functions for graphnet.models.

    -
    -
    -graphnet.models.utils.calculate_xyzt_homophily(x, edge_index, batch)[source]
    -

    Calculate xyzt-homophily from a batch of graphs.

    -

    Homophily is a graph scalar quantity that measures the likeness of -variables in nodes. Notice that this calculator assumes a special order of -input features in x.

    -
    -
    Return type:
    -

    Tuple[Tensor, Tensor, Tensor, Tensor]

    -
    -
    Returns:
    -

    Tuple, each element with shape [batch_size,1].

    -
    -
    Parameters:
    -
      -
    • x (Tensor) –

    • -
    • edge_index (LongTensor) –

    • -
    • batch (Batch) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.utils.calculate_distance_matrix(xyz_coords)[source]
    -

    Calculate the matrix of pairwise distances between pulses.

    -
    -
    Parameters:
    -

    xyz_coords (Tensor) – (x,y,z)-coordinates of pulses, of shape [nb_doms, 3].

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Returns:
    -

    Matrix of pairwise distances, of shape [nb_doms, nb_doms]

    -
    -
    -
    -
    -
    -graphnet.models.utils.knn_graph_batch(batch, k, columns)[source]
    -

    Calculate k-nearest-neighbours with individual k for each batch event.

    -
    -
    Parameters:
    -
      -
    • batch (Batch) – Batch of events.

    • -
    • k (List[int]) – A list of k’s.

    • -
    • columns (List[int]) – The columns of Data.x used for computing the distances. E.g., -Data.x[:,[0,1,2]]

    • -
    -
    -
    Return type:
    -

    Batch

    -
    -
    Returns:
    -

    Returns the same batch of events, but with updated edges.

    -
    -
    -
    +
    +

    utils

    diff --git a/api/graphnet.training.html b/api/graphnet.training.html index c9ac2b308..04944187c 100644 --- a/api/graphnet.training.html +++ b/api/graphnet.training.html @@ -429,33 +429,9 @@
  • ProgressBar
  • -
  • labels -
  • -
  • loss_functions -
  • -
  • utils -
  • +
  • labels
  • +
  • loss_functions
  • +
  • utils
  • weight_fitting @@ -1466,16 +942,12 @@

    I

  • I3GalacticPlaneHybridRecoExtractor (class in graphnet.data.extractors.i3hybridrecoextractor)
  • I3GenericExtractor (class in graphnet.data.extractors.i3genericextractor) -
  • -
  • I3InferenceModule (class in graphnet.deployment.i3modules.graphnet_module)
  • I3NTMuonLabelExtractor (class in graphnet.data.extractors.i3ntmuonlabelsextractor)
  • I3ParticleExtractor (class in graphnet.data.extractors.i3particleextractor)
  • I3PISAExtractor (class in graphnet.data.extractors.i3pisaextractor) -
  • -
  • I3PulseCleanerModule (class in graphnet.deployment.i3modules.graphnet_module)
  • I3PulseNoiseTruthFlagIceCubeUpgrade (class in graphnet.data.extractors.i3featureextractor)
  • @@ -1488,8 +960,6 @@

    I

  • I3TruthExtractor (class in graphnet.data.extractors.i3truthextractor)
  • I3TUMExtractor (class in graphnet.data.extractors.i3tumextractor) -
  • -
  • IceCube86 (class in graphnet.models.detector.icecube)
  • ICECUBE86 (graphnet.data.constants.FEATURES attribute) @@ -1497,26 +967,10 @@

    I

  • (graphnet.data.constants.TRUTH attribute)
  • -
  • IceCubeDeepCore (class in graphnet.models.detector.icecube) -
  • -
  • key (graphnet.training.labels.Label property) -
  • - -

    L

    @@ -1623,10 +1043,6 @@

    L

    M

    -

    N

    @@ -1874,59 +1194,13 @@

    N

    @@ -1951,48 +1225,22 @@

    P

  • pairwise_shuffle() (in module graphnet.data.utilities.random)
  • ParquetDataConverter (class in graphnet.data.parquet.parquet_dataconverter) -
  • -
  • ParquetDataset (class in graphnet.data.dataset.parquet.parquet_dataset)
  • ParquetToSQLiteConverter (class in graphnet.data.utilities.parquet_to_sqlite)
  • -
  • parse_graph_definition() (in module graphnet.data.dataset.dataset) +
  • path (graphnet.utilities.config.dataset_config.DatasetConfig attribute)
  • -
  • path (graphnet.data.dataset.dataset.Dataset property) - -
  • PiecewiseLinearLR (class in graphnet.training.callbacks)
  • plot_1D_contour() (in module graphnet.pisa.plotting) -
  • -
  • plot_2D_contour() (in module graphnet.pisa.plotting) -
  • -
  • pop_default() (graphnet.utilities.argparse.Options method)
  • -

    Q

    - - -
    -

    R

    - +

    U

    @@ -2175,24 +1359,6 @@

    U

    -

    V

    - - - -
    -

    W

      @@ -2213,18 +1379,6 @@

      W

    -

    Z

    - - - -
    -
    diff --git a/objects.inv b/objects.inv index 15dce94cf086e5602b7ab8ee5819bad45d3cb43e..5e33e3275e92f85d67a5d017300f84393eeae647 100644 GIT binary patch delta 4055 zcmV;|4=C`iFsUGrcz@Y)<2Dw3=U1RAd5kraxN2%Dd2+km-L7$WJ6>uYSr7?Hh$)f{ zfVTBF`Ih;z!A1(CNMbQ5`Nbx{d(XMpS6;0NEH^n~pxphwEz&AO;GY1N_#bQiVzFV{ z>?if=>}PNmH7}APrwrzd4(m1|!zd|aa2P(#@T ztH=9!@E>nK-(7s1*7%3J$A>SI)e(s{PX$rX^VAN7Jww${z%$hg1wLJ+5P!JPGa?KT zOMldowe(4C*?*5bbZN2-SG*n)FZ2a^MTy7`kru=@MLG}73TG&$)oO)b1yxCklE2u^ zL$*dN-oi3oRe8d2k(hD;bg$1ptEiCH({G8zgw1YAa7s{8 z1f{-5-6sS3)OjAjj6dS!HiJ3(B1P`FNJyRx+GjB=XWOdCGq^;&YKCM?FMvagx0#g{4c2m(S-7Q6) z=Ii^5p!6OZU}64IAsW^pv^9NF+tIu}|MT(c{(DGz`)sg~6U0bZg4>!t3GQfKpWlgy zkOcSHU?HbvhI6zn!rKb;BubxzBIqL|yk;$kH;h91|T z6@lBZ8mTlxs)p8-|6)wzZILsSK#~<S@^Lk{gYZFPkPXHiAekmc_+EdLaSq_1-v~ji-PBg8i!BAjq+-Hbbo4` z@RqPdCrY^91U@#cc$k5duc!Hvi&^?MEoJG*YAO9c_Bpd2)qM-^;#S3a9Bn(;GKCS{ zqv`Qu!($ocex!UXi(F5hgY2O{cq>7c%Bh4bwNMMGe_nNeL^GUoasTHHkMyYaRx{qr zzkvBp4dLRFgq%?3F<KM2qa%Itz9{a8B(o!o7TRU%$9BY;&n$7SkrRX`uz2E1j2d zsLYDp7Ug40|3+pwkXSQQ%pc<@r#V0)0oTCzQXIcDtRqe1`l;qE~vaFyVSE< z%u>-BG4T-p`q3jv;zHE+KO)vS*lK-eHQ-e||uL6<$rg*Wp;yx-Tj>jCY3bLJ%!D&)4EsYrJ9ZKEG<*=G| zSV$Y2S!B(ZeH6LC)v`Xgiwm-@#BBUlgobl_h{X?nA-+dVAxUGXk+m>!*!R|b?BmCB)ant^|XQI^u9 z@r#K$pV2g(qnPtmLbMzEcjY99ns^)nAWY&bwO=dA9WRfzm zAvrfBmftH_2`W{1*-BOB1%`fN|mofnz@26V?v$8~_I=CKNBseKgz-xa_iaTw#mcaL>) ze($x%WYjXJFYaKIpLTpxJAp0|!lM(IxvZ}P;8V1$)-z!vLJBeyw*2OL!zY^=Qr)}#h&_O1b^@HQikWhV zB6fqG(OeaWGJ@cC{vvpRr1@H4`c!E2xX?4zDJkwyi6HxgGd6kOU_y(gP&M5}(-XUPy5xjy`d$J$V8?EG!GEE? zTpi^WcWh6e{)F6#()c_YsAns%>E^;YX}zi@?MS+epD-Ea>imKU*oPz6UyN`j=>EzC zFd`yPNBbV6y*~QEVSME2@ZQ6OUSIj(Fh274FYjT+kav2}`zi`??e1aCuxofQ@G|ms zwC-U|zms;*^CI$bf9_+!f!lP@iGPf|9Fcoa_joV|-Pp*>DYyrDk2i79jg7qBaeEjr z;L98Iy@|YBWP2FV<69eaVI`=P|uN%N6Y}my(di|AEQAhgyZ0jQ@U|~nobPEz7Rq0t3E4miGM`PwGe74 zjjcn`kjtZSFsC`;n7>6A^g&-~pzlrO?7Yc`ywiHSvC)XPmvI`8Zeh#`lp{yKw_cW;-gM|KfnRB5w1rk9kiqC z$gw;d(=wAYoG*rA_r8S}Ykx;sk#{27HpoNO;Ve|I6^?rDQ|BY1_{LUlc3P13lJ(G8 zhX;VSWs#$?ot^{98n_NyfEO9uA>xCt6cmkmwo#Ntk$DgZaow5lr(W&Vsh zP=!Vz{jozx>T@zz4lS?SuL_Jal%x>5%BZ5wBzqN|{Y|)+YMtjcXn!uETVb9LVTgUo z=drEWL(yhKQv?Qa(_N0zHFCk#qPhfCz=j+Dg>%Y0(3L`qLq%Dcp5S_&p_pM-`0yEO zZC2yOAh%=b+Dll3t#wdI89g=~0b7VfxI#)$Wm4T%YR2JZUDLyhx})QmH&(hKrd3`y2J^xEnQC~&A46?2I-RxTFB_7 zDt{V;QP=PB3MbH|2KK1v4!9LJOec0lwNr0by0(g*^0(rK>6~-QA-lw4z*_gD(0TEE9;QXwb#sIDqNz2oaK=;Fi25XvGZ>jaR(3*kIKga&%^e?eU`p z+Hq0;Y5X9x+vY@%V=X&i%gGU_L(%AUR0mk&o+?n!k+XjlclfST7GOj~?k+dn2@f1~ z05c}?`MS@Yw14NTK+ukh+&{r_r#<*^2beLD)0YkIZ04sBKnEhPOVRFJeYdGWCo;0$ zmv-RmT&@QDa5R2z*8$tS;R|#i8ow**fNI_<1v(Io-*R$5HSa?K9f-zmBspO2Z!Q7( zDDeii^;#Qvlq8YxHK#9e8KV1GC3)F|BUGKCEpOs#yMI-Va_#F;Km82;_jk0)ikD;~ zoNNw@ec>a)Stjo&7SD)0WAp-w6@cP|L|%D)Daexm0+nn+pGDfgp@-i=`vw%s)owUF=I`=oNha_DydA>NHU-Ui<>e&5q)P zo)rWmZcu7$y%~(nglP+ND3+IMsnA3VPV`zLg5^fBu3&%2c)qyfmhOH5eOt;#8kAiU zlAEE86)$kULPUPjgY$z7?o7^k(PKVQiPmCQQc(OtORE;tv8r-ii^pg$rt6>~p)TBs zJq~VXTSi9JI5H4%lY2PJ-C6xYiGNjF!;0N_Yf-yZzZlulrU;T`(^gb*O6Oj^zvV?1 J{U7E|{^i$D^aKC^ delta 6124 zcmVnj-1y{yzsl_Mr5s#i|indK?l>9(C+5wl^yBqX8C zR|24I)z|d5%$E&bq)3S*E+CTfBGUqR?>QI1#ghQIrl{CtmHEXg zqXdo;#8~jN9Tx()yu7-)zxeiWC6N*5lsuOlkA{AIadq+i^VNU<*EI0IpMU*&Gm+`{ z58rODo_@Rf{x}W(_xrC8=a=Ie|Mc+v>9^5*M4?UNAR2la@6gy2xP}Iv$S*Ybcupbz z6GE?;a!f7lQGZL;(k8WKFGc90$< zX}Zc7P6hsyPAyxV(A0iNE9F5^Aj)v4WWpG)WDjSWfHBzwizs_&V`ij*jaema{)TQ1 zXOH%A+{=3%cJhruOO7?cqw0Cu!+s>hG>Z1z*MGvm%r?UMk7F@+>K?dFNzi&;o7OnPltf;&XJ6N) z_8jP5pMBL)KDDRY5{n6&?K34Y#Zm5+`VMuM4Cqo9d4Lk}lSKCk%J6SW6N&Nbl17(-2I8+vG zhKcjZgC1jSRuG2#(%w{bN_R_9r1|>s+$+6@23VNIaGv=!2yIQ5)b43spZ)Xu)#Epx z^mf@`A*YyAzXZ26T@t*fd42XEBYYCvWrKyB6$#03TKHAxLv5FYcW7UqJ*vpt`+uwJ z3!g0Lv&dqKTBh^Lg0`kh^7k~a&(x^LC(&It*vQM&FS!jpr^Gh(L{h)sdL{ILeUP@~ zBq5w&ht+~w1)jK_N>tDFyciqmN3t(h3E~`QVTKvUaTpU7kLcFAo(i+f!z3mKr6>`D z`_q{q9%rn`84iUVJj_=rW7xa~t$!#={X9}@`nZPCRDVLk!Zgn~ju1_9zqB_k@iZaJ zy@QhECaI*+JH?90J0V(X=D(szV*E>Jxp2|VpV|bgd zGJ#t!0k(``#QxUF;cabm83o_+IG07xosoX_kOIrur_DW$n9UO_vFl8~W4&*{4>(*4 z{SX%PRvh#;{P4{!w1N(3`hRma-b4fIrqzt2`Iji$>T!nLX{<)QS%}kuZ!P{egK17J z#tinr8;co?fwmJPo1%^u&@$2uj+$Ip`{|KQkfV|)dypE=8HtIXh^RN^%+0B0+Nwf{NtTeD%Le)I83}*F?|*~=R(Zyz)rp^% zoR*o);xNkN*~)stqJ>scXtmtQRRcS1w|zu3py|(9jZvR+-3I~3Emm8`F+w_%rTaU| z5-Qq-JO$^w76)gx5iby-Z$z&10MA%-6Am$n9-CzAkYTeN98%2kwtl_m`=8 z9_8}*LnhqW3bhk;KwX|!^Sg@{Q)gE65Lp;z0A8+g`i5xS-+w|J%|LNnMmWri`ZaFv z%-EBwWIRtVzuiujUjq9)i=Rk}{}2<6{B262$*{}p8F4XH!tom0=Sxl;X_04m@T!ml zQ9wRXMZQ1Gmk?RJdW^5zO0`*Rf=SuBvl#sM0h@F zvYJHmCvBNx#(&Tn&&Vi7eWHgJGY}*QtEx9Xb&=Qk}Rx>YHE(~eM%O_*9gc0F8fBujJPr+<2RFxdW�czKeb~&l6nHrq z1$Z%43idN`gH&!Dex>A1XgypvI@8Pv{2i^=$qdQUJToWGcIa zP6uAI8#?zbN>DEDGrCjx0whCP@IYLr3~?a{|0EgXF4>|rTr$UKn56vA_1)u> zLq0Y6+=+ZL&V}(s^&KHM%O2oNZ0I>wJ!}h*ji`h#^i%Ay&V=7@MWFLHA z7PW|X*}Ca#(C>OUy~yB>c_eEYB_zg3H3@9uB>sMPHwVoFiU~@lV(Pj84&saZdQgWv z&{OZvo!zpV{s>-I5A`#EIb|<%%2ef$Dpf9dX!npzgur$yVW`mm0G?f%1U+4E?+f<5L2g~oinu$g^gLh>W|e+)gP;4YN^`_amd65 zpap5UBHWt0E^t71b%BIjY3mLvFH0R8FJxS9sFvr|$Am5@o(M8TtbK@KEPT1$cx=28 zb60*EA{Krc!YXK6+*(uj#2-x%RCT+kTz@&#TdY!KRgyD%+c^@WteNcGJnzZAJl91 zjQcpgV+@b&J?e8^_NiMzJZ)fhPx=2s#F@q+}?Y-OS9BONuSfh# z-kJ42vrlx9p4ck}O`6{?I?b8aGyWH6WcFpeeH-_(1wLi~!7F-({DeA>@pC@E`Or==p4L}j9P z`pV5(kvyX=T4~7jEe#$B!Ta_v9&Yk9FE%9Ej)nh{WuZRkqb|og0)JM13$#CP<ye_*oS@DVem z38*Z7QxR+&bM=1huq8z`g5|D5UpMEjG`9?K&xe4aF_Q4M_69peM2&g=*>f~|Mw9& zGpMhzB?DF0n~COYiW&Qi==iDFVVkeXiC!&YlO|#*kJVMfx{&{f(;4VEX7h2W(acO_ zjlJg6GfEOYR4a0M#cmeQ$|r_il#j)@8{%79AT0=JevDwf=YPgtTbs~>m^bpz*m%nq zyd4_OyTFP$U|97npiFHkKr*+jV0IZ(5ql;2w!GNhm}#gbhdOvcgfmjrcRyU3$;QNd zH~ISF_V>vNma<9?Pkl$DvQDzcVanNN^0Hy}L&8A$rC46OrS06!B5HElXl;IY4i0R;}_x zty!%GzkA&UK7=agA-hFiHYRk6Ui3HrZXOV0_Az!&lWAIEEa zppvc&`3)5aTqWy*yF91sQhsFVUWWQ}yN|^Ues?4rnfT#Xvm>#-5w2#r9n{_B?aTXZ z_m!eibJ5_+wOqMncRI_LMP7vFrQK*GjT?O|9;_uH{%a)FhdrI~8(Mx`%g^HXMNMld z>poT=x__m-vt#h0kcoKXGA(8&9lp6~B2x9HuHjUG)@cI%6~Rf&X5*KmLzEEo&3Ik3uSa(=vH|Af1AmS!*~mt8U1EDOW7NkJ#iOK5Piu%; z=P)yx4`xoaZ%p+^2E#Z$u>r~MmG$YY*!wE`NsT#uP5mI1^ihn0>G|i2Tg=g^Mu*CVnGH~*|HHvK zcz=@VZoqz@37iKs)f{F4Ej4v-rRxmfkl2rL>AJ=`I=}PWV>D`&(_v=rwLL-F{Re6z zo8U7<8_R^Mjho;1JzBP-s8y@BU*_O8&6Qd%xevZi?9*^)$106#VNgnDvfY)L>9{c1 zVu#aKQgd+EakBDRGk;E=NKR#4WLSL%8WqLx>6>F1>no|9`}#di zhibB~`DDVboqneU$51cZdnDkE|M-?*M3ZgEcwQ7zNX$-2b)4>$W|=qsr!>KTlrWVo zKEs9aZPCX?r?Vo)QBD!hDQKIVPW#idK}Ig(?hdNLwrx_8#`97`KfNC{V>H{vg7S5*re1iFlnKNb{_>F zBj z>!Y*dr^29z`QXJ5eiRS5z=(e#2_L;}XhO@XU>K&dUS429RhY?XW&Q?!;{st!5#NCN zUL^==m$e`+SeTF(U@y`Dm#FADFL}Ut{A-*?!6%_6Bh>TYT>e}%aaw{+e19!}f8CN^ zs@sQTxADe6D*{8irmMu$;bqB}F|s2+4P z)wM!vxgD+!_kgm20&EKH;6$Jw%MH^JyFm0u3B@+T8ruZy*wx?ADIGn^E$+~sKK4V@ zM+~FXvYJ8vwgQ_!Zb|D@ZGX}Zq)SY*D2mh^bdYN&r-66jLC_^Rs{>$yhK?p{MA+EO zMlT=IebtJ4Ki+1-8fTb_8G?ri`>K@*`|&mtHrr=PVtI|6j};x-l_4FEY_?=PE%QvD zGWW5jQ@^sO^P$ZmQQPHrqr9BtK)15w;DOZ&F&p->ps820HzRG#qJODlUXIaLthn2; zHgbwN^&)R8R^;ti8#yZylHs)QD*8j^%80`!Hbd0*4<94iij@KFSSxv%dQmsjYT`!R zFzxP08kDhpLM^C+Kpu5~6`NzG?wk$nYG7GMKrK+8wDlp^ikesdhEnov;l-%*G@<(0 z!3P@J0f%&5qcPyfe1A~&a5y(KVlPO6WzDq^8YzQqLLG3Bu_0(ocsQT~=FNcO!FX^e zW7;^ts@tQ~A|9&5KpKZ;VJBL71kXcADGeyLJ*>JuJS`28iaJm z_93a>lR-I*ynm^`Ixx&o&|I!6u@L4I?HU(Ld4Xy)oUOR!8M|$1a-iM8~#Z$#vRbLLW@I3S(#qYdY#~q6P~;9HPqUyjR(El zz|yssunb#$P)jvhY&HP4l!<^sDp|8MCx2RTJQhjv^2(m3*4JDg#i@qD+ZP4`v@qzrc0Dj&b zgxc{%_@EuP0DiRtMAz}5kDwj57`!GQL;}bzeS??Fd9T>FmOpbjpEu5h)( zHm-UN^zQ}a?l}PP;azn>J8m&>cbOf3*!B7@I$vv4`x2Pf?~6&SSR z7VslDaN2`|J3%vM0Xo?LXKNmX2uxt%v?&_S)wP>C*o$0P_e%r#_BK}syYOOo-!1^V zxx+UwfyMA%Q2=UlS7~4Zi{V{P0MzDwsK5jkLpzcHn7f@zf%>WNdY1KC3wVr13whU^ z*?+{<5Z%RU$%{I?z~v{LiYl&`TU9UD-X8V0zX$*K@5L&~-=dAYalT*dH_;P(O4J_3 z{1wwzg5QFC6(ISBL~VI}%jt^*3QARlzRI-U;iq4N_6{hV$u@@tX2o5K#(657_!BE| zBuAF2l?cSNmt=7%#(vvR)lm5-5ydf0aeqvZ$OE;nT;6+$gp&w~Kz;y30`d)E3gsWc z{<5Fi5yzxf-3Cexsr_7%M5)VlsRX|VU+?bB5}-*FBl5vdL9y8~60uJ?C0G~~H}9@? zMY4ko-W!~Yq9jm(`MoHz#s33M=m4wx4d~(k diff --git a/py-modindex.html b/py-modindex.html index 6a97e1d01..9090eb417 100644 --- a/py-modindex.html +++ b/py-modindex.html @@ -360,41 +360,6 @@

    Python Module Index

        graphnet.data.dataconverter - - -     - graphnet.data.dataloader - - - -     - graphnet.data.dataset - - - -     - graphnet.data.dataset.dataset - - - -     - graphnet.data.dataset.parquet - - - -     - graphnet.data.dataset.parquet.parquet_dataset - - - -     - graphnet.data.dataset.sqlite - - - -     - graphnet.data.dataset.sqlite.sqlite_dataset -     @@ -490,11 +455,6 @@

    Python Module Index

        graphnet.data.parquet.parquet_dataconverter - - -     - graphnet.data.pipeline -     @@ -535,156 +495,6 @@

    Python Module Index

        graphnet.deployment - - -     - graphnet.deployment.i3modules.graphnet_module - - - -     - graphnet.models - - - -     - graphnet.models.coarsening - - - -     - graphnet.models.components - - - -     - graphnet.models.components.layers - - - -     - graphnet.models.components.pool - - - -     - graphnet.models.detector - - - -     - graphnet.models.detector.detector - - - -     - graphnet.models.detector.icecube - - - -     - graphnet.models.detector.prometheus - - - -     - graphnet.models.gnn - - - -     - graphnet.models.gnn.convnet - - - -     - graphnet.models.gnn.dynedge - - - -     - graphnet.models.gnn.dynedge_jinst - - - -     - graphnet.models.gnn.dynedge_kaggle_tito - - - -     - graphnet.models.gnn.gnn - - - -     - graphnet.models.graphs - - - -     - graphnet.models.graphs.edges - - - -     - graphnet.models.graphs.edges.edges - - - -     - graphnet.models.graphs.graph_definition - - - -     - graphnet.models.graphs.graphs - - - -     - graphnet.models.graphs.nodes - - - -     - graphnet.models.graphs.nodes.nodes - - - -     - graphnet.models.model - - - -     - graphnet.models.standard_model - - - -     - graphnet.models.task - - - -     - graphnet.models.task.classification - - - -     - graphnet.models.task.reconstruction - - - -     - graphnet.models.task.task - - - -     - graphnet.models.utils -     @@ -710,21 +520,6 @@

    Python Module Index

        graphnet.training.callbacks - - -     - graphnet.training.labels - - - -     - graphnet.training.loss_functions - - - -     - graphnet.training.utils -     diff --git a/searchindex.js b/searchindex.js index 1e27dc996..17d49d0af 100644 --- a/searchindex.js +++ b/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["about", "api/graphnet", "api/graphnet.constants", "api/graphnet.data", "api/graphnet.data.constants", "api/graphnet.data.dataconverter", "api/graphnet.data.dataloader", "api/graphnet.data.dataset", "api/graphnet.data.dataset.dataset", "api/graphnet.data.dataset.parquet", "api/graphnet.data.dataset.parquet.parquet_dataset", "api/graphnet.data.dataset.sqlite", "api/graphnet.data.dataset.sqlite.sqlite_dataset", "api/graphnet.data.extractors", "api/graphnet.data.extractors.i3extractor", "api/graphnet.data.extractors.i3featureextractor", "api/graphnet.data.extractors.i3genericextractor", "api/graphnet.data.extractors.i3hybridrecoextractor", "api/graphnet.data.extractors.i3ntmuonlabelsextractor", "api/graphnet.data.extractors.i3particleextractor", "api/graphnet.data.extractors.i3pisaextractor", "api/graphnet.data.extractors.i3quesoextractor", "api/graphnet.data.extractors.i3retroextractor", "api/graphnet.data.extractors.i3splinempeextractor", "api/graphnet.data.extractors.i3truthextractor", "api/graphnet.data.extractors.i3tumextractor", "api/graphnet.data.extractors.utilities", "api/graphnet.data.extractors.utilities.collections", "api/graphnet.data.extractors.utilities.frames", "api/graphnet.data.extractors.utilities.types", "api/graphnet.data.parquet", "api/graphnet.data.parquet.parquet_dataconverter", "api/graphnet.data.pipeline", "api/graphnet.data.sqlite", "api/graphnet.data.sqlite.sqlite_dataconverter", "api/graphnet.data.sqlite.sqlite_utilities", "api/graphnet.data.utilities", "api/graphnet.data.utilities.parquet_to_sqlite", "api/graphnet.data.utilities.random", "api/graphnet.data.utilities.string_selection_resolver", "api/graphnet.deployment", "api/graphnet.deployment.i3modules", "api/graphnet.deployment.i3modules.deployer", "api/graphnet.deployment.i3modules.graphnet_module", "api/graphnet.models", "api/graphnet.models.coarsening", "api/graphnet.models.components", "api/graphnet.models.components.layers", "api/graphnet.models.components.pool", "api/graphnet.models.detector", "api/graphnet.models.detector.detector", "api/graphnet.models.detector.icecube", "api/graphnet.models.detector.prometheus", "api/graphnet.models.gnn", "api/graphnet.models.gnn.convnet", "api/graphnet.models.gnn.dynedge", "api/graphnet.models.gnn.dynedge_jinst", "api/graphnet.models.gnn.dynedge_kaggle_tito", "api/graphnet.models.gnn.gnn", "api/graphnet.models.graphs", "api/graphnet.models.graphs.edges", "api/graphnet.models.graphs.edges.edges", "api/graphnet.models.graphs.graph_definition", "api/graphnet.models.graphs.graphs", "api/graphnet.models.graphs.nodes", "api/graphnet.models.graphs.nodes.nodes", "api/graphnet.models.model", "api/graphnet.models.standard_model", "api/graphnet.models.task", "api/graphnet.models.task.classification", "api/graphnet.models.task.reconstruction", "api/graphnet.models.task.task", "api/graphnet.models.utils", "api/graphnet.pisa", "api/graphnet.pisa.fitting", "api/graphnet.pisa.plotting", "api/graphnet.training", "api/graphnet.training.callbacks", "api/graphnet.training.labels", "api/graphnet.training.loss_functions", "api/graphnet.training.utils", "api/graphnet.training.weight_fitting", "api/graphnet.utilities", "api/graphnet.utilities.argparse", "api/graphnet.utilities.config", "api/graphnet.utilities.config.base_config", "api/graphnet.utilities.config.configurable", "api/graphnet.utilities.config.dataset_config", "api/graphnet.utilities.config.model_config", "api/graphnet.utilities.config.parsing", "api/graphnet.utilities.config.training_config", "api/graphnet.utilities.decorators", "api/graphnet.utilities.filesys", "api/graphnet.utilities.imports", "api/graphnet.utilities.logging", "api/graphnet.utilities.maths", "api/modules", "contribute", "index", "install"], "filenames": ["about.md", "api/graphnet.rst", "api/graphnet.constants.rst", "api/graphnet.data.rst", "api/graphnet.data.constants.rst", "api/graphnet.data.dataconverter.rst", "api/graphnet.data.dataloader.rst", "api/graphnet.data.dataset.rst", "api/graphnet.data.dataset.dataset.rst", "api/graphnet.data.dataset.parquet.rst", "api/graphnet.data.dataset.parquet.parquet_dataset.rst", "api/graphnet.data.dataset.sqlite.rst", "api/graphnet.data.dataset.sqlite.sqlite_dataset.rst", "api/graphnet.data.extractors.rst", "api/graphnet.data.extractors.i3extractor.rst", "api/graphnet.data.extractors.i3featureextractor.rst", "api/graphnet.data.extractors.i3genericextractor.rst", "api/graphnet.data.extractors.i3hybridrecoextractor.rst", "api/graphnet.data.extractors.i3ntmuonlabelsextractor.rst", "api/graphnet.data.extractors.i3particleextractor.rst", "api/graphnet.data.extractors.i3pisaextractor.rst", "api/graphnet.data.extractors.i3quesoextractor.rst", "api/graphnet.data.extractors.i3retroextractor.rst", "api/graphnet.data.extractors.i3splinempeextractor.rst", "api/graphnet.data.extractors.i3truthextractor.rst", "api/graphnet.data.extractors.i3tumextractor.rst", "api/graphnet.data.extractors.utilities.rst", "api/graphnet.data.extractors.utilities.collections.rst", "api/graphnet.data.extractors.utilities.frames.rst", "api/graphnet.data.extractors.utilities.types.rst", "api/graphnet.data.parquet.rst", "api/graphnet.data.parquet.parquet_dataconverter.rst", "api/graphnet.data.pipeline.rst", "api/graphnet.data.sqlite.rst", "api/graphnet.data.sqlite.sqlite_dataconverter.rst", "api/graphnet.data.sqlite.sqlite_utilities.rst", "api/graphnet.data.utilities.rst", "api/graphnet.data.utilities.parquet_to_sqlite.rst", "api/graphnet.data.utilities.random.rst", "api/graphnet.data.utilities.string_selection_resolver.rst", "api/graphnet.deployment.rst", "api/graphnet.deployment.i3modules.rst", "api/graphnet.deployment.i3modules.deployer.rst", "api/graphnet.deployment.i3modules.graphnet_module.rst", "api/graphnet.models.rst", "api/graphnet.models.coarsening.rst", "api/graphnet.models.components.rst", "api/graphnet.models.components.layers.rst", "api/graphnet.models.components.pool.rst", "api/graphnet.models.detector.rst", "api/graphnet.models.detector.detector.rst", "api/graphnet.models.detector.icecube.rst", "api/graphnet.models.detector.prometheus.rst", "api/graphnet.models.gnn.rst", "api/graphnet.models.gnn.convnet.rst", "api/graphnet.models.gnn.dynedge.rst", "api/graphnet.models.gnn.dynedge_jinst.rst", "api/graphnet.models.gnn.dynedge_kaggle_tito.rst", "api/graphnet.models.gnn.gnn.rst", "api/graphnet.models.graphs.rst", "api/graphnet.models.graphs.edges.rst", "api/graphnet.models.graphs.edges.edges.rst", "api/graphnet.models.graphs.graph_definition.rst", "api/graphnet.models.graphs.graphs.rst", "api/graphnet.models.graphs.nodes.rst", "api/graphnet.models.graphs.nodes.nodes.rst", "api/graphnet.models.model.rst", "api/graphnet.models.standard_model.rst", "api/graphnet.models.task.rst", "api/graphnet.models.task.classification.rst", "api/graphnet.models.task.reconstruction.rst", "api/graphnet.models.task.task.rst", "api/graphnet.models.utils.rst", "api/graphnet.pisa.rst", "api/graphnet.pisa.fitting.rst", "api/graphnet.pisa.plotting.rst", "api/graphnet.training.rst", "api/graphnet.training.callbacks.rst", "api/graphnet.training.labels.rst", "api/graphnet.training.loss_functions.rst", "api/graphnet.training.utils.rst", "api/graphnet.training.weight_fitting.rst", "api/graphnet.utilities.rst", "api/graphnet.utilities.argparse.rst", "api/graphnet.utilities.config.rst", "api/graphnet.utilities.config.base_config.rst", "api/graphnet.utilities.config.configurable.rst", "api/graphnet.utilities.config.dataset_config.rst", "api/graphnet.utilities.config.model_config.rst", "api/graphnet.utilities.config.parsing.rst", "api/graphnet.utilities.config.training_config.rst", "api/graphnet.utilities.decorators.rst", "api/graphnet.utilities.filesys.rst", "api/graphnet.utilities.imports.rst", "api/graphnet.utilities.logging.rst", "api/graphnet.utilities.maths.rst", "api/modules.rst", "contribute.md", "index.rst", "install.md"], "titles": ["About", "API", "constants", "data", "constants", "dataconverter", "dataloader", "dataset", "dataset", "parquet", "parquet_dataset", "sqlite", "sqlite_dataset", "extractors", "i3extractor", "i3featureextractor", "i3genericextractor", "i3hybridrecoextractor", "i3ntmuonlabelsextractor", "i3particleextractor", "i3pisaextractor", "i3quesoextractor", "i3retroextractor", "i3splinempeextractor", "i3truthextractor", "i3tumextractor", "utilities", "collections", "frames", "types", "parquet", "parquet_dataconverter", "pipeline", "sqlite", "sqlite_dataconverter", "sqlite_utilities", "utilities", "parquet_to_sqlite", "random", "string_selection_resolver", "deployment", "i3modules", "deployer", "graphnet_module", "models", "coarsening", "components", "layers", "pool", "detector", "detector", "icecube", "prometheus", "gnn", "convnet", "dynedge", "dynedge_jinst", "dynedge_kaggle_tito", "gnn", "graphs", "edges", "edges", "graph_definition", "graphs", "nodes", "nodes", "model", "standard_model", "task", "classification", "reconstruction", "task", "utils", "pisa", "fitting", "plotting", "training", "callbacks", "labels", "loss_functions", "utils", "weight_fitting", "utilities", "argparse", "config", "base_config", "configurable", "dataset_config", "model_config", "parsing", "training_config", "decorators", "filesys", "imports", "logging", "maths", "src", "Contribute", "About", "Install"], "terms": {"graphnet": [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 34, 35, 36, 37, 38, 39, 40, 43, 44, 45, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 97, 98, 99], "i": [0, 1, 8, 10, 12, 14, 16, 27, 28, 29, 34, 35, 38, 39, 43, 45, 48, 54, 55, 61, 65, 69, 70, 71, 72, 75, 77, 78, 79, 81, 83, 88, 89, 92, 93, 94, 97, 98, 99], "an": [0, 5, 29, 31, 32, 34, 39, 43, 62, 79, 92, 94, 97, 98, 99], "open": [0, 97, 98], "sourc": [0, 4, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 34, 35, 37, 38, 39, 43, 45, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 75, 77, 78, 79, 80, 81, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 97, 98], "python": [0, 1, 5, 13, 14, 16, 27, 29, 97, 98, 99], "framework": [0, 98], "aim": [0, 1, 97, 98], "provid": [0, 1, 8, 10, 12, 43, 44, 79, 97, 98, 99], "high": [0, 98], "qualiti": [0, 98], "user": [0, 44, 77, 98, 99], "friendli": [0, 98], "end": [0, 1, 5, 31, 34, 98], "function": [0, 5, 6, 8, 29, 35, 38, 43, 45, 48, 51, 52, 62, 66, 69, 70, 71, 72, 74, 75, 79, 80, 82, 87, 88, 89, 92, 93, 95, 98], "perform": [0, 45, 47, 48, 53, 55, 57, 67, 69, 70, 71, 98], "reconstruct": [0, 1, 15, 17, 18, 22, 23, 25, 32, 40, 44, 57, 68, 71, 98], "task": [0, 1, 44, 67, 69, 70, 79, 97, 98], "neutrino": [0, 1, 47, 57, 74, 98], "telescop": [0, 1, 98], "us": [0, 1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 14, 19, 24, 26, 27, 31, 32, 34, 35, 36, 37, 39, 40, 43, 44, 47, 48, 50, 55, 56, 57, 61, 62, 63, 66, 68, 69, 70, 71, 72, 74, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 93, 94, 97, 98, 99], "graph": [0, 1, 6, 8, 10, 12, 43, 44, 47, 48, 50, 60, 61, 62, 64, 65, 72, 78, 80, 97, 98], "neural": [0, 1, 98], "network": [0, 1, 54, 98], "gnn": [0, 1, 32, 44, 54, 55, 56, 57, 62, 67, 98, 99], "make": [0, 5, 81, 87, 88, 97, 98, 99], "fast": [0, 98, 99], "easi": [0, 98], "train": [0, 1, 7, 39, 40, 43, 62, 67, 77, 78, 79, 80, 81, 83, 87, 88, 90, 96, 98, 99], "complex": [0, 44, 98], "model": [0, 1, 40, 43, 45, 46, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 67, 68, 69, 70, 71, 72, 75, 76, 77, 79, 80, 83, 85, 87, 88, 90, 96, 98, 99], "can": [0, 1, 8, 10, 12, 14, 16, 19, 37, 43, 48, 62, 74, 75, 81, 83, 85, 87, 88, 97, 98, 99], "event": [0, 1, 8, 10, 12, 21, 35, 37, 39, 43, 48, 62, 69, 70, 71, 72, 74, 79, 81, 87, 98], "state": [0, 98], "art": [0, 98], "arbitrari": [0, 98], "detector": [0, 1, 24, 44, 51, 52, 62, 63, 65, 67, 98], "configur": [0, 1, 8, 44, 66, 67, 74, 82, 84, 85, 87, 88, 90, 94, 98], "infer": [0, 1, 32, 40, 43, 67, 69, 70, 71, 98, 99], "time": [0, 4, 35, 45, 48, 70, 94, 98, 99], "ar": [0, 1, 4, 5, 8, 10, 12, 16, 29, 31, 34, 37, 39, 43, 48, 55, 57, 59, 60, 61, 62, 63, 64, 69, 74, 79, 81, 87, 88, 97, 98, 99], "order": [0, 27, 45, 72, 98], "magnitud": [0, 98], "faster": [0, 98], "than": [0, 6, 69, 70, 71, 80, 94, 98], "tradit": [0, 98], "techniqu": [0, 98], "common": [0, 1, 79, 85, 87, 88, 90, 91, 93, 98], "ml": [0, 1, 98], "develop": [0, 1, 97, 98, 99], "physicist": [0, 1, 98], "wish": [0, 97, 98], "tool": [0, 1, 98], "research": [0, 98], "By": [0, 37, 69, 70, 71, 98], "unit": [0, 5, 93, 97, 98], "both": [0, 16, 69, 70, 71, 75, 98], "group": [0, 5, 31, 34, 48, 98], "increas": [0, 77, 98], "longev": [0, 98], "usabl": [0, 98], "individu": [0, 5, 8, 10, 12, 48, 55, 72, 98], "code": [0, 24, 35, 62, 87, 88, 98], "contribut": [0, 98, 99], "from": [0, 1, 6, 8, 10, 12, 13, 14, 16, 18, 19, 21, 27, 28, 29, 32, 34, 37, 43, 48, 57, 61, 62, 65, 66, 69, 70, 71, 72, 75, 77, 78, 79, 85, 86, 87, 88, 90, 94, 97, 98, 99], "build": [0, 1, 44, 50, 61, 65, 66, 85, 87, 88, 98], "gener": [0, 5, 8, 10, 12, 16, 43, 59, 60, 62, 63, 64, 69, 79, 98], "reusabl": [0, 98], "softwar": [0, 79, 98], "packag": [0, 1, 38, 89, 92, 93, 97, 98, 99], "base": [0, 4, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 31, 32, 34, 37, 39, 43, 45, 47, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 74, 77, 78, 79, 81, 83, 85, 86, 87, 88, 90, 93, 94, 98], "engin": [0, 98], "best": [0, 97, 98], "practic": [0, 97, 98], "lower": [0, 75, 98], "technic": [0, 98], "threshold": [0, 43, 98], "most": [0, 1, 39, 98, 99], "scientif": [0, 1, 98], "problem": [0, 61, 97, 98], "The": [0, 5, 8, 10, 12, 27, 29, 32, 34, 35, 43, 45, 47, 48, 55, 57, 61, 62, 69, 70, 71, 72, 74, 75, 77, 78, 79, 98], "improv": [0, 1, 83, 98], "classif": [0, 1, 44, 68, 71, 79, 98], "yield": [0, 55, 74, 79, 98], "veri": [0, 39, 98], "accur": [0, 98], "e": [0, 1, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 29, 31, 32, 34, 35, 39, 43, 45, 47, 48, 50, 51, 52, 54, 58, 61, 62, 65, 66, 67, 69, 70, 71, 72, 77, 78, 79, 81, 85, 94, 97, 98, 99], "g": [0, 1, 5, 8, 10, 12, 24, 27, 29, 31, 32, 34, 35, 39, 43, 48, 62, 69, 70, 71, 72, 81, 94, 97, 98, 99], "low": [0, 98], "energi": [0, 4, 32, 69, 70, 71, 81, 98], "observ": [0, 98], "icecub": [0, 1, 15, 28, 29, 44, 47, 49, 57, 93, 98, 99], "here": [0, 97, 98, 99], "implement": [0, 1, 5, 14, 30, 31, 33, 34, 47, 54, 55, 56, 57, 61, 79, 97, 98], "wa": [0, 98], "appli": [0, 8, 10, 12, 14, 48, 54, 55, 56, 57, 58, 67, 89, 98], "oscil": [0, 73, 98], "lead": [0, 98], "signific": [0, 98], "angular": [0, 98], "rang": [0, 69, 70, 71, 98], "relev": [0, 1, 29, 38, 92, 97, 98], "studi": [0, 98], "furthermor": [0, 98], "shown": [0, 98], "could": [0, 97, 98], "muon": [0, 18, 98], "v": [0, 98], "therebi": [0, 1, 87, 88, 98], "effici": [0, 98], "puriti": [0, 98], "sampl": [0, 39, 62, 63, 98], "analysi": [0, 32, 98, 99], "similarli": [0, 29, 98], "ha": [0, 5, 29, 31, 34, 35, 43, 54, 79, 92, 98, 99], "great": [0, 98], "point": [0, 23, 78, 79, 98], "analys": [0, 40, 73, 98], "final": [0, 48, 77, 87, 98], "millisecond": [0, 98], "allow": [0, 40, 44, 48, 77, 85, 90, 98, 99], "whole": [0, 98], "new": [0, 1, 34, 47, 85, 90, 97, 98], "type": [0, 5, 6, 8, 10, 12, 13, 14, 26, 27, 28, 31, 34, 35, 37, 38, 39, 45, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 75, 77, 79, 80, 81, 83, 85, 86, 87, 88, 89, 92, 93, 94, 95, 97, 98], "cosmic": [0, 98], "alert": [0, 98], "which": [0, 8, 10, 12, 14, 15, 24, 28, 32, 39, 43, 45, 48, 55, 62, 63, 66, 69, 74, 79, 83, 98, 99], "were": [0, 98], "previous": [0, 98], "unfeas": [0, 98], "possibl": [0, 27, 97, 98], "identifi": [0, 5, 8, 10, 12, 24, 87, 88, 98], "10": [0, 32, 83, 98], "tev": [0, 98], "monitor": [0, 98], "rate": [0, 77, 98], "direct": [0, 57, 69, 70, 71, 76, 78, 98], "real": [0, 98], "thi": [0, 3, 5, 8, 10, 12, 14, 16, 29, 31, 34, 35, 38, 43, 44, 48, 55, 62, 63, 65, 67, 69, 70, 71, 72, 74, 75, 77, 79, 81, 85, 87, 88, 90, 94, 97, 98, 99], "enabl": [0, 3, 98], "first": [0, 77, 85, 90, 97, 98], "ever": [0, 98], "despit": [0, 98], "larg": [0, 79, 98], "background": [0, 98], "origin": [0, 74, 98], "compris": [0, 98], "number": [0, 5, 8, 10, 12, 31, 32, 34, 39, 47, 48, 54, 55, 56, 57, 58, 61, 63, 65, 69, 70, 71, 77, 83, 98], "modul": [0, 3, 8, 29, 32, 40, 43, 44, 47, 49, 53, 59, 60, 62, 63, 64, 66, 68, 73, 76, 82, 84, 87, 88, 89, 90, 93, 98], "necessari": [0, 27, 97, 98], "workflow": [0, 98], "ingest": [0, 1, 3, 49, 98], "raw": [0, 65, 98], "data": [0, 1, 4, 5, 6, 8, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 45, 47, 48, 49, 50, 51, 54, 55, 56, 57, 58, 61, 62, 63, 66, 67, 69, 70, 71, 72, 78, 80, 83, 85, 87, 90, 93, 96, 98, 99], "domain": [0, 1, 3, 40, 98], "specif": [0, 1, 3, 5, 8, 10, 12, 15, 29, 30, 31, 33, 34, 35, 40, 45, 48, 49, 50, 51, 52, 53, 58, 61, 62, 65, 67, 68, 69, 70, 71, 79, 97, 98, 99], "format": [0, 1, 3, 5, 8, 27, 31, 34, 75, 87, 97, 98, 99], "deploi": [0, 1, 40, 43, 98], "chain": [0, 1, 40, 44, 67, 98, 99], "illustr": [0, 97, 98], "figur": [0, 75, 98], "level": [0, 8, 10, 12, 24, 35, 45, 48, 94, 98, 99], "overview": [0, 98], "typic": [0, 27, 98], "convert": [0, 1, 3, 5, 27, 31, 34, 37, 98, 99], "industri": [0, 3, 98], "standard": [0, 3, 4, 5, 31, 34, 39, 51, 52, 62, 63, 65, 67, 83, 97, 98], "intermedi": [0, 1, 3, 5, 8, 31, 34, 54, 98, 99], "file": [0, 1, 3, 5, 8, 10, 12, 14, 27, 31, 34, 37, 38, 43, 62, 66, 74, 77, 79, 83, 84, 85, 86, 87, 88, 92, 94, 98, 99], "read": [0, 3, 8, 10, 12, 27, 50, 55, 67, 68, 98, 99], "simpl": [0, 44, 98], "physic": [0, 1, 14, 28, 29, 40, 43, 44, 68, 69, 70, 71, 98], "orient": [0, 44, 98], "compon": [0, 1, 44, 47, 48, 67, 98], "manag": [0, 14, 76, 98], "experi": [0, 1, 76, 98], "log": [0, 1, 70, 76, 77, 79, 82, 98, 99], "deploy": [0, 1, 41, 43, 62, 96, 98], "modular": [0, 44, 98], "subclass": [0, 44, 98], "torch": [0, 8, 10, 12, 44, 47, 62, 63, 66, 93, 98, 99], "nn": [0, 44, 47, 61, 63, 98], "mean": [0, 5, 8, 10, 12, 31, 34, 44, 55, 57, 79, 88, 98], "onli": [0, 1, 8, 10, 12, 44, 48, 69, 70, 71, 74, 81, 88, 93, 98, 99], "need": [0, 27, 44, 66, 79, 98, 99], "import": [0, 1, 35, 44, 82, 98], "few": [0, 44, 97, 98], "exist": [0, 8, 10, 12, 32, 34, 35, 44, 78, 87, 98], "purpos": [0, 44, 79, 98], "built": [0, 44, 98], "them": [0, 1, 27, 44, 55, 69, 70, 71, 74, 98, 99], "togeth": [0, 44, 61, 67, 98], "form": [0, 44, 69, 85, 90, 98], "complet": [0, 44, 67, 98], "extend": [0, 1, 98], "suit": [0, 98], "through": [0, 79, 98], "layer": [0, 44, 46, 48, 54, 55, 56, 57, 69, 70, 71, 98], "connect": [0, 61, 62, 65, 79, 98], "etc": [0, 79, 94, 98], "optimis": [0, 1, 98], "differ": [0, 8, 10, 12, 14, 63, 67, 97, 98, 99], "track": [0, 14, 18, 70, 97, 98], "These": [0, 62, 97, 98], "prepar": [0, 79, 98], "satisfi": [0, 98], "o": [0, 69, 70, 71, 98], "load": [0, 6, 8, 38, 66, 85, 87, 98], "requir": [0, 20, 35, 69, 79, 87, 88, 90, 98, 99], "when": [0, 5, 8, 10, 12, 27, 31, 34, 35, 43, 47, 55, 57, 78, 94, 97, 98, 99], "batch": [0, 6, 32, 45, 47, 48, 67, 72, 80, 83, 98], "do": [0, 43, 79, 87, 88, 97, 98, 99], "predict": [0, 19, 23, 25, 32, 43, 54, 66, 67, 69, 70, 71, 79, 80, 98], "either": [0, 8, 10, 12, 79, 98, 99], "contain": [0, 5, 8, 10, 12, 27, 28, 31, 32, 34, 43, 55, 59, 60, 62, 63, 64, 66, 69, 70, 71, 79, 81, 83, 98, 99], "imag": [0, 1, 97, 98, 99], "portabl": [0, 98], "depend": [0, 98, 99], "free": [0, 79, 98], "split": [0, 45, 98], "up": [0, 5, 31, 34, 43, 97, 98, 99], "interfac": [0, 73, 87, 88, 98, 99], "block": [0, 1, 98], "pre": [0, 50, 62, 78, 97, 98], "directli": [0, 14, 98], "while": [0, 16, 77, 98], "continu": [0, 79, 98], "expand": [0, 98], "": [0, 5, 6, 8, 10, 12, 14, 27, 34, 37, 54, 55, 67, 69, 70, 71, 72, 77, 81, 83, 87, 88, 94, 95, 98, 99], "capabl": [0, 98], "project": [0, 97, 98], "receiv": [0, 98], "fund": [0, 98], "european": [0, 98], "union": [0, 6, 8, 10, 12, 16, 27, 29, 43, 45, 47, 48, 55, 62, 63, 66, 67, 69, 70, 71, 87, 90, 92, 98], "horizon": [0, 98], "2020": [0, 98], "innov": [0, 98], "programm": [0, 98], "under": [0, 98], "mari": [0, 98], "sk\u0142odowska": [0, 98], "curi": [0, 98], "grant": [0, 79, 98], "agreement": [0, 97, 98], "No": [0, 98], "890778": [0, 98], "work": [0, 4, 28, 97, 98, 99], "rasmu": [0, 56, 98], "\u00f8rs\u00f8e": [0, 98], "partli": [0, 98], "punch4nfdi": [0, 98], "consortium": [0, 98], "support": [0, 29, 97, 98, 99], "dfg": [0, 98], "nfdi": [0, 98], "39": [0, 98, 99], "1": [0, 5, 8, 27, 31, 34, 39, 45, 48, 55, 57, 61, 63, 69, 70, 71, 72, 77, 79, 81, 87, 98, 99], "germani": [0, 98], "conveni": [1, 97, 99], "collabor": 1, "solv": [1, 97], "It": [1, 27, 35, 43, 97], "leverag": 1, "advanc": [1, 48], "machin": [1, 99], "learn": [1, 43, 77, 99], "without": [1, 61, 65, 74, 79, 99], "have": [1, 5, 16, 31, 34, 35, 39, 48, 62, 69, 70, 71, 97, 99], "expert": 1, "themselv": [1, 87, 88], "acceler": 1, "area": 1, "phyic": 1, "design": 1, "principl": 1, "all": [1, 5, 8, 10, 12, 14, 16, 31, 34, 35, 43, 47, 48, 50, 55, 58, 62, 66, 71, 79, 85, 86, 87, 88, 89, 90, 94, 97, 99], "streamlin": 1, "process": [1, 5, 14, 43, 50, 55, 97, 99], "transform": [1, 48, 69, 70, 71, 81], "extens": [1, 92], "basic": 1, "across": [1, 2, 8, 10, 12, 29, 36, 48, 67, 79, 82, 83, 84, 94], "variou": 1, "easili": 1, "architectur": [1, 54, 55, 56, 57, 67], "main": [1, 53, 62, 67, 97, 99], "featur": [1, 3, 4, 5, 8, 10, 12, 15, 32, 43, 47, 48, 50, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 69, 72, 80, 87, 97], "i3": [1, 5, 14, 28, 29, 31, 34, 38, 43, 92, 99], "more": [1, 8, 35, 38, 85, 87, 88, 90, 94], "index": [1, 5, 8, 10, 12, 29, 35, 48, 77], "sqlite": [1, 3, 7, 12, 32, 34, 35, 37, 99], "suitabl": 1, "plug": 1, "plai": 1, "abstract": [1, 5, 8, 50, 58, 62, 66, 71, 86], "awai": 1, "detail": [1, 99], "expos": 1, "physicst": 1, "what": [1, 62, 97], "i3modul": [1, 40, 43], "includ": [1, 66, 67, 74, 79, 85, 97], "docker": 1, "run": [1, 37], "containeris": 1, "fashion": 1, "subpackag": [1, 3, 7, 13, 40, 44, 59, 82], "dataset": [1, 3, 6, 9, 10, 11, 12, 18, 39, 62, 83, 87], "extractor": [1, 3, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34, 43], "parquet": [1, 3, 7, 10, 31, 37, 99], "util": [1, 3, 13, 27, 28, 29, 35, 37, 38, 39, 44, 76, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96], "constant": [1, 3, 96], "dataconvert": [1, 3, 31, 34], "dataload": [1, 3, 32, 62, 66, 67, 80, 90], "pipelin": [1, 3], "coarsen": [1, 44, 48], "standard_model": [1, 44], "pisa": [1, 20, 32, 74, 75, 93, 96, 99], "fit": [1, 66, 73, 75, 79, 81, 90], "plot": [1, 73], "callback": [1, 66, 76], "label": [1, 8, 18, 21, 54, 62, 67, 71, 75, 76, 80], "loss_funct": [1, 69, 70, 71, 76], "weight_fit": [1, 76], "config": [1, 6, 39, 74, 79, 82, 83, 85, 86, 87, 88, 89, 90], "argpars": [1, 82], "decor": [1, 5, 82, 93], "filesi": [1, 82], "math": [1, 82], "submodul": [1, 3, 7, 9, 11, 13, 26, 30, 33, 36, 41, 44, 46, 49, 53, 59, 60, 64, 68, 73, 76, 82, 84, 89], "global": [2, 4, 55, 57, 66], "i3extractor": [3, 5, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 31, 34], "i3featureextractor": [3, 4, 13, 34, 43], "i3genericextractor": [3, 13, 34], "i3hybridrecoextractor": [3, 13], "i3ntmuonlabelsextractor": [3, 13], "i3particleextractor": [3, 13], "i3pisaextractor": [3, 13], "i3quesoextractor": [3, 13], "i3retroextractor": [3, 13], "i3splinempeextractor": [3, 13], "i3truthextractor": [3, 4, 13], "i3tumextractor": [3, 13], "parquet_dataconvert": [3, 30], "sqlite_dataconvert": [3, 33], "sqlite_util": [3, 33], "parquet_to_sqlit": [3, 36], "random": [3, 8, 10, 12, 36, 39, 87], "string_selection_resolv": [3, 36], "truth": [3, 4, 8, 10, 12, 15, 24, 32, 35, 62, 80, 81, 87], "fileset": [3, 5], "init_global_index": [3, 5], "cache_output_fil": [3, 5], "collate_fn": [3, 6, 76, 80], "do_shuffl": [3, 6], "insqlitepipelin": [3, 32], "class": [4, 5, 6, 7, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 29, 30, 31, 32, 33, 34, 37, 39, 43, 45, 47, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 74, 77, 78, 79, 81, 83, 85, 86, 87, 88, 89, 90, 94, 97], "object": [4, 5, 8, 10, 12, 14, 16, 27, 29, 43, 45, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 74, 79, 83, 94], "namespac": [4, 66, 87, 88], "name": [4, 5, 6, 8, 10, 12, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31, 32, 34, 35, 37, 43, 62, 63, 65, 69, 70, 71, 74, 78, 81, 83, 85, 87, 88, 89, 90, 94, 97, 99], "icecube86": [4, 49, 51], "dom_x": [4, 43], "dom_i": [4, 43], "dom_z": [4, 43], "dom_tim": 4, "charg": [4, 43, 79], "rde": 4, "pmt_area": 4, "deepcor": [4, 15, 51], "upgrad": [4, 15, 51, 99], "string": [4, 5, 8, 10, 12, 27, 31, 34, 39, 48, 85], "pmt_number": 4, "dom_numb": 4, "pmt_dir_x": 4, "pmt_dir_i": 4, "pmt_dir_z": 4, "dom_typ": 4, "prometheu": [4, 44, 49], "sensor_pos_x": 4, "sensor_pos_i": 4, "sensor_pos_z": 4, "t": [4, 29, 35, 75, 77, 79, 99], "kaggl": [4, 47, 51, 57], "x": [4, 5, 24, 31, 34, 47, 48, 65, 66, 71, 72, 75, 79, 81], "y": [4, 24, 72, 75, 99], "z": [4, 5, 24, 31, 34, 72, 99], "auxiliari": 4, "energy_track": 4, "position_x": 4, "position_i": 4, "position_z": 4, "azimuth": [4, 70, 78], "zenith": [4, 70, 78], "pid": [4, 39, 87], "elast": 4, "sim_typ": 4, "interaction_typ": 4, "interaction_tim": [4, 70], "inelast": [4, 70], "stopped_muon": 4, "injection_energi": 4, "injection_typ": 4, "injection_interaction_typ": 4, "injection_zenith": 4, "injection_azimuth": 4, "injection_bjorkenx": 4, "injection_bjorkeni": 4, "injection_position_x": 4, "injection_position_i": 4, "injection_position_z": 4, "injection_column_depth": 4, "primary_lepton_1_typ": 4, "primary_hadron_1_typ": 4, "primary_lepton_1_position_x": 4, "primary_lepton_1_position_i": 4, "primary_lepton_1_position_z": 4, "primary_hadron_1_position_x": 4, "primary_hadron_1_position_i": 4, "primary_hadron_1_position_z": 4, "primary_lepton_1_direction_theta": 4, "primary_lepton_1_direction_phi": 4, "primary_hadron_1_direction_theta": 4, "primary_hadron_1_direction_phi": 4, "primary_lepton_1_energi": 4, "primary_hadron_1_energi": 4, "total_energi": 4, "i3_fil": [5, 14], "str": [5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 34, 35, 37, 38, 39, 43, 47, 48, 50, 51, 52, 55, 57, 62, 63, 65, 66, 67, 69, 70, 71, 74, 78, 80, 81, 83, 85, 86, 87, 88, 89, 90, 92, 94], "gcd_file": [5, 14, 43], "paramet": [5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 34, 35, 37, 38, 39, 43, 45, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 75, 77, 78, 79, 80, 81, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95], "output_fil": [5, 31, 34], "global_index": 5, "avail": [5, 16, 32, 93], "pool": [5, 44, 45, 46, 55, 57], "worker": [5, 31, 32, 34, 38, 83, 94], "return": [5, 6, 8, 10, 12, 14, 27, 28, 29, 31, 34, 35, 37, 38, 39, 45, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 75, 77, 78, 79, 80, 81, 83, 85, 86, 87, 88, 89, 92, 93, 94, 95], "none": [5, 6, 8, 10, 12, 14, 16, 24, 28, 29, 31, 32, 34, 35, 37, 39, 43, 47, 48, 55, 57, 62, 63, 65, 66, 67, 69, 70, 71, 74, 77, 79, 80, 81, 83, 85, 86, 87, 89, 92, 94], "synchron": 5, "list": [5, 6, 8, 10, 12, 14, 16, 24, 27, 29, 31, 32, 34, 35, 37, 38, 39, 43, 45, 47, 48, 50, 55, 57, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 75, 77, 80, 81, 87, 89, 90, 92, 94], "process_method": 5, "cach": 5, "output": [5, 31, 34, 37, 54, 55, 56, 58, 65, 66, 67, 74, 81, 87, 88, 99], "typevar": 5, "f": [5, 48], "bound": [5, 75], "callabl": [5, 6, 8, 29, 47, 48, 50, 51, 52, 62, 69, 70, 71, 80, 81, 85, 87, 88, 89, 93], "ani": [5, 6, 8, 10, 12, 27, 28, 29, 31, 34, 43, 45, 47, 48, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 75, 79, 81, 83, 85, 86, 87, 88, 89, 90, 94, 99], "outdir": [5, 31, 32, 34, 37, 74], "gcd_rescu": [5, 31, 34, 92], "nb_files_to_batch": [5, 31, 34], "sequential_batch_pattern": [5, 31, 34], "input_file_batch_pattern": [5, 31, 34], "index_column": [5, 8, 10, 12, 31, 34, 35, 39, 74, 80, 81, 87], "icetray_verbos": [5, 31, 34], "abc": [5, 8, 14, 32, 66, 78, 81, 86, 87, 88], "logger": [5, 8, 14, 32, 37, 39, 61, 66, 78, 81, 82, 94, 99], "construct": [5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 31, 34, 37, 39, 45, 46, 47, 50, 51, 52, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 69, 70, 71, 74, 77, 78, 79, 80, 81, 83, 86, 87, 88, 94], "regular": [5, 29, 31, 34], "express": [5, 31, 34, 66, 79], "accord": [5, 31, 34, 45, 48, 61, 62, 63], "match": [5, 31, 34, 81, 92, 95], "certain": [5, 31, 34, 37, 74], "pattern": [5, 31, 34], "wildcard": [5, 31, 34], "same": [5, 29, 31, 34, 35, 45, 48, 69, 72, 77, 89, 94], "input": [5, 8, 10, 12, 31, 32, 34, 43, 51, 54, 55, 56, 57, 58, 62, 65, 69, 71, 72, 85, 90], "replac": [5, 31, 34, 85, 87, 88, 90], "period": [5, 31, 34], "special": [5, 16, 31, 34, 43, 72], "interpret": [5, 31, 34, 69], "liter": [5, 31, 34], "charact": [5, 31, 34], "regex": [5, 31, 34], "For": [5, 29, 31, 34, 77], "instanc": [5, 8, 14, 24, 29, 31, 34, 43, 62, 66, 74, 78, 80, 86, 88, 99], "A": [5, 8, 31, 32, 34, 43, 48, 63, 72, 74, 79, 81, 99], "_": [5, 31, 34], "0": [5, 8, 10, 12, 31, 34, 39, 43, 45, 48, 54, 55, 57, 61, 63, 72, 74, 75, 79, 87], "9": [5, 31, 34], "5": [5, 8, 10, 12, 31, 34, 39, 83, 99], "zst": [5, 31, 34], "find": [5, 31, 34, 92], "whose": [5, 31, 34, 43], "one": [5, 8, 31, 34, 35, 43, 48, 66, 87, 88, 92, 97, 99], "capit": [5, 31, 34], "letter": [5, 31, 34], "follow": [5, 31, 34, 55, 67, 79, 81, 97, 99], "underscor": [5, 31, 34], "five": [5, 31, 34], "upgrade_genie_step4_141020_a_000000": [5, 31, 34], "upgrade_genie_step4_141020_a_000001": [5, 31, 34], "upgrade_genie_step4_141020_a_000008": [5, 31, 34], "upgrade_genie_step4_141020_a_000009": [5, 31, 34], "would": [5, 31, 34, 97], "upgrade_genie_step4_141020_a_00000x": [5, 31, 34], "suffix": [5, 31, 34], "upgrade_genie_step4_141020_a_000010": [5, 31, 34], "separ": [5, 27, 31, 34, 77, 99], "upgrade_genie_step4_141020_a_00001x": [5, 31, 34], "int": [5, 6, 8, 10, 12, 18, 21, 31, 32, 34, 39, 47, 48, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 77, 79, 80, 81, 83, 87, 90, 94], "properti": [5, 8, 14, 19, 29, 48, 58, 65, 67, 71, 78, 86, 94], "file_suffix": [5, 31, 34], "execut": [5, 35], "method": [5, 8, 10, 12, 14, 26, 27, 28, 29, 31, 34, 43, 47, 48, 70, 79, 81], "set": [5, 16, 69, 70, 71, 97], "inherit": [5, 14, 29, 50, 65, 79, 94], "path": [5, 8, 10, 12, 35, 38, 43, 62, 66, 74, 75, 83, 85, 86, 87, 92, 99], "correspond": [5, 8, 10, 12, 27, 29, 34, 38, 55, 62, 81, 92, 99], "gcd": [5, 14, 28, 38, 43, 92], "save_data": [5, 31, 34], "save": [5, 14, 27, 31, 34, 35, 66, 74, 79, 80, 81, 85, 86, 87, 88, 99], "ordereddict": [5, 31, 34], "extract": [5, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 34, 37, 38, 43, 69, 70, 71], "merge_fil": [5, 31, 34], "input_fil": [5, 31, 34], "merg": [5, 31, 34, 79, 99], "result": [5, 31, 34, 48, 77, 79, 80, 89, 99], "option": [5, 8, 10, 12, 24, 31, 32, 34, 43, 47, 48, 55, 57, 62, 63, 66, 69, 70, 71, 74, 75, 81, 82, 83, 85, 87, 92, 99], "default": [5, 8, 10, 12, 16, 24, 27, 31, 32, 34, 35, 37, 43, 47, 48, 54, 55, 56, 57, 61, 62, 63, 65, 66, 69, 70, 71, 74, 75, 77, 78, 79, 81, 83, 85, 87, 92], "current": [5, 31, 34, 39, 77, 97, 99], "rais": [5, 8, 16, 31, 66, 85, 90], "notimplementederror": [5, 31], "If": [5, 8, 16, 31, 32, 34, 66, 69, 70, 71, 74, 77, 81, 97, 99], "been": [5, 31, 43, 79, 97], "backend": [5, 9, 11, 31, 34], "question": 5, "get_map_funct": 5, "nb_file": 5, "map": [5, 8, 10, 12, 15, 16, 34, 35, 43, 51, 52, 62, 63, 85, 87, 88, 90], "pure": [5, 13, 14, 16, 29], "multiprocess": [5, 99], "tupl": [5, 8, 10, 12, 28, 29, 47, 55, 57, 69, 70, 71, 72, 74, 75, 80, 83], "remov": [6, 80, 83], "less": [6, 80], "two": [6, 55, 74, 77, 79, 80], "dom": [6, 8, 10, 12, 45, 48, 80], "hit": [6, 80], "should": [6, 8, 10, 12, 14, 27, 39, 47, 48, 62, 63, 79, 80, 85, 87, 88, 90, 97, 99], "occur": [6, 80], "product": [6, 80], "selection_nam": 6, "check": [6, 28, 29, 34, 35, 83, 92, 93, 97, 99], "whether": [6, 28, 29, 34, 35, 55, 66, 79, 89, 92, 93], "shuffl": [6, 38, 80], "select": [6, 8, 10, 12, 21, 39, 80, 81, 87, 97], "bool": [6, 28, 29, 34, 35, 39, 43, 55, 66, 67, 74, 77, 79, 80, 81, 83, 89, 92, 93, 94], "batch_siz": [6, 32, 72, 80], "num_work": [6, 80], "persistent_work": [6, 80], "prefetch_factor": 6, "kwarg": [6, 8, 10, 12, 45, 47, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 79, 81, 85, 87, 88, 94], "t_co": 6, "classmethod": [6, 8, 66, 79, 85, 86], "from_dataset_config": 6, "datasetconfig": [6, 8, 39, 84, 87], "dict": [6, 8, 16, 27, 29, 32, 34, 50, 51, 52, 62, 63, 66, 67, 74, 75, 77, 80, 83, 85, 87, 88, 89, 90], "parquet_dataset": [7, 9], "sqlite_dataset": [7, 11], "columnmissingexcept": [7, 8], "load_modul": [7, 8, 66], "parse_graph_definit": [7, 8], "ensembledataset": [7, 8, 87], "except": 8, "indic": [8, 39, 48, 77, 83, 97], "miss": 8, "column": [8, 10, 12, 35, 43, 61, 62, 63, 65, 66, 67, 69, 70, 71, 72, 74, 81], "class_nam": [8, 88, 94], "cfg": 8, "graphdefinit": [8, 10, 12, 43, 59, 60, 62, 63, 64, 80, 97], "arg": [8, 10, 12, 45, 50, 51, 52, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 69, 70, 71, 79, 83, 85, 90, 94], "pulsemap": [8, 10, 12, 15, 34, 43, 80, 87], "puls": [8, 10, 12, 15, 16, 28, 29, 34, 35, 43, 45, 48, 65, 72], "seri": [8, 10, 12, 15, 16, 28, 29, 35, 43], "node": [8, 10, 12, 44, 45, 48, 54, 55, 57, 59, 60, 61, 62, 63, 69, 70, 71, 72], "multipl": [8, 10, 12, 14, 77, 87, 94], "store": [8, 10, 12, 14, 32, 35, 74, 78], "ad": [8, 10, 12, 15, 55, 62, 74], "attribut": [8, 10, 12, 45, 69, 70, 71], "node_truth": [8, 10, 12, 80, 87], "event_no": [8, 10, 12, 35, 39, 81, 87], "uniqu": [8, 10, 12, 35, 37, 87], "indici": [8, 10, 12, 28, 39, 79], "tabl": [8, 10, 12, 14, 32, 34, 35, 62, 74, 81], "truth_tabl": [8, 10, 12, 74, 80, 81, 87], "inform": [8, 10, 12, 14, 16, 24, 75], "node_truth_t": [8, 10, 12, 80, 87], "string_select": [8, 10, 12, 80, 87], "subset": [8, 10, 12, 47, 55, 57], "given": [8, 10, 12, 34, 48, 61, 69, 70, 71, 81, 83], "queri": [8, 10, 12, 35, 39], "pass": [8, 10, 12, 47, 54, 55, 56, 57, 58, 62, 66, 67, 69, 70, 71, 79, 81, 97], "dtype": [8, 10, 12, 62, 63, 95], "float32": [8, 10, 12, 62, 63], "tensor": [8, 10, 12, 45, 47, 48, 50, 54, 55, 56, 57, 58, 65, 66, 67, 69, 70, 71, 72, 79, 95], "loss_weight_t": [8, 10, 12, 80, 87], "per": [8, 10, 12, 16, 35, 48, 69, 70, 71, 79, 81], "loss": [8, 10, 12, 62, 67, 69, 70, 71, 77, 79, 83], "weight": [8, 10, 12, 43, 62, 69, 70, 71, 74, 79, 81, 88, 99], "loss_weight_column": [8, 10, 12, 62, 80, 87], "also": [8, 10, 12, 39, 87], "assign": [8, 10, 12, 37, 45, 48, 97], "loss_weight_default_valu": [8, 10, 12, 62, 87], "float": [8, 10, 12, 43, 54, 61, 62, 63, 66, 74, 75, 77, 79, 80, 87], "note": [8, 10, 12, 75, 88], "valu": [8, 10, 12, 24, 27, 34, 35, 48, 62, 63, 75, 78, 79, 83, 85], "specifi": [8, 10, 12, 39, 45, 69, 70, 71, 75, 77, 99], "case": [8, 10, 12, 16, 43, 48, 69, 70, 71, 99], "That": [8, 10, 12, 55, 70, 78], "ignor": [8, 10, 12, 29], "seed": [8, 10, 12, 39, 62, 63, 80, 87], "resolv": [8, 10, 12, 39], "10000": [8, 10, 12, 39], "20": [8, 10, 12, 39, 94], "graph_definit": [8, 10, 12, 43, 44, 59, 80, 87], "defin": [8, 10, 12, 39, 43, 48, 59, 60, 61, 62, 64, 85, 87, 88, 90], "represent": [8, 10, 12, 29, 48, 63], "from_config": [8, 66, 86, 87, 88], "concaten": [8, 27, 55], "query_t": [8, 10, 12], "sequential_index": [8, 10, 12], "some": [8, 10, 12, 62], "out": [8, 55, 67, 68, 79, 94, 97, 99], "sequenti": 8, "len": 8, "self": [8, 62, 74, 85, 90], "_may_": 8, "_indic": 8, "entir": [8, 66], "impos": 8, "befor": [8, 55, 69, 70, 71, 77], "scalar": [8, 72, 79], "length": [8, 29, 77], "element": [8, 27, 29, 67, 72, 89], "present": [8, 83, 92, 93], "add_label": 8, "fn": [8, 29, 85, 89], "kei": [8, 16, 27, 28, 29, 34, 35, 48, 78, 87, 88], "add": [8, 55, 83, 97, 99], "custom": [8, 62, 77], "concatdataset": 8, "singl": [8, 14, 48, 55, 78, 87, 88], "collect": [8, 13, 14, 26, 79, 95], "iter": 8, "parquetdataset": [9, 10], "pytorch": [10, 12, 77, 99], "sqlitedataset": [11, 12], "databas": [12, 32, 34, 35, 37, 74, 81, 99], "i3fram": [13, 14, 16, 28, 29, 43], "frame": [13, 14, 16, 26, 29, 34, 43], "i3extractorcollect": [13, 14], "i3featureextractoricecube86": [13, 15], "i3featureextractoricecubedeepcor": [13, 15], "i3featureextractoricecubeupgrad": [13, 15], "i3pulsenoisetruthflagicecubeupgrad": [13, 15], "i3galacticplanehybridrecoextractor": [13, 17], "i3ntmuonlabelextractor": [13, 18], "i3splinempeicextractor": [13, 23], "__call__": 14, "icetrai": [14, 28, 29, 43, 93], "keep": 14, "proven": 14, "set_fil": 14, "refer": [14, 87], "being": [14, 43, 69, 70, 71], "get": [14, 28, 77, 80, 99], "treat": 14, "86": [15, 51], "nois": [15, 28, 43], "flag": [15, 43], "exclude_kei": 16, "dynam": [16, 47, 55, 56, 57], "pars": [16, 75, 82, 83, 84, 85, 90], "call": [16, 29, 34, 48, 74, 81, 94], "tri": [16, 29], "automat": [16, 79, 97], "cast": [16, 29], "done": [16, 48, 94, 97], "recurs": [16, 29, 89, 92], "each": [16, 27, 29, 35, 37, 38, 45, 48, 51, 52, 55, 57, 61, 62, 63, 65, 66, 69, 70, 71, 72, 74, 75, 77, 92], "look": [16, 99], "member": [16, 29, 87, 88, 94], "variabl": [16, 29, 55, 72, 81, 94], "signatur": [16, 29], "similar": [16, 29, 99], "handl": [16, 79, 83, 94], "hand": 16, "mc": [16, 34, 35], "tree": [16, 34], "trigger": 16, "exclud": [16, 37, 99], "valueerror": [16, 66], "hybrid": 17, "galatict": 17, "plane": [17, 79], "tum": [18, 25], "dnn": [18, 25], "padding_valu": [18, 21], "northeren": 18, "i3particl": 19, "other": [19, 35, 61, 79, 97], "algorithm": 19, "comparison": [19, 79], "quantiti": [20, 69, 70, 71, 72], "queso": 21, "retro": [22, 32], "splinemp": 23, "border": 24, "mctree": [24, 28], "ndarrai": [24, 62, 81], "arrai": [24, 27], "boundari": 24, "volum": 24, "coordin": [24, 72], "particl": [24, 35, 78], "start": [24, 97, 99], "stop": [24, 83], "within": [24, 45, 47, 48, 55, 61], "hard": 24, "i3mctre": 24, "flatten_nested_dictionari": [26, 27], "serialis": [26, 27], "transpose_list_of_dict": [26, 27], "frame_is_montecarlo": [26, 28], "frame_is_nois": [26, 28], "get_om_keys_and_pulseseri": [26, 28], "is_boost_enum": [26, 29], "is_boost_class": [26, 29], "is_icecube_class": [26, 29], "is_typ": [26, 29], "is_method": [26, 29], "break_cyclic_recurs": [26, 29], "get_member_vari": [26, 29], "cast_object_to_pure_python": [26, 29], "cast_pulse_series_to_pure_python": [26, 29], "manipul": [27, 59, 60, 64], "obj": [27, 29, 89], "parent_kei": 27, "flatten": 27, "nest": 27, "dictionari": [27, 28, 29, 32, 34, 62, 63, 74, 75, 85, 87, 88, 90], "non": [27, 29, 34, 35, 79], "exampl": [27, 39, 45, 48, 79, 87, 88, 99], "d": [27, 62, 65, 97], "b": [27, 45, 48], "c": [27, 48, 79, 99], "2": [27, 48, 55, 57, 61, 63, 70, 72, 74, 75, 79, 87, 99], "a__b": 27, "applic": 27, "combin": [27, 87], "parent": 27, "__": [27, 29], "nester": 27, "json": [27, 87], "therefor": 27, "we": [27, 29, 39, 97, 99], "outer": 27, "abl": [27, 99], "de": 27, "transpos": 27, "mont": 28, "carlo": 28, "simul": [28, 43], "pulseseri": 28, "calibr": [28, 29], "gcd_dict": [28, 29], "p": [28, 34, 79], "om": [28, 29], "dataclass": 28, "i3calibr": 28, "indicesfor": 28, "boost": 29, "enum": 29, "ensur": [29, 38, 79, 94, 97, 99], "isn": 29, "return_discard": 29, "valid": [29, 39, 67, 69, 70, 71, 79, 83, 85, 90], "mangl": 29, "take": [29, 34, 48, 97], "mainli": 29, "cannot": [29, 85, 90], "trivial": [29, 71], "doe": [29, 88], "try": 29, "equival": 29, "its": 29, "like": [29, 48, 72, 79, 95, 97], "otherwis": [29, 79], "itself": [29, 69, 70, 71], "deem": 29, "wai": [29, 39, 97, 99], "optic": 29, "found": [29, 79], "parquetdataconvert": [30, 31], "module_dict": 32, "devic": 32, "retro_table_nam": 32, "n_worker": [32, 74], "pipeline_nam": 32, "creat": [32, 34, 35, 62, 85, 86, 90, 97, 99], "initialis": [32, 88], "gnn_module_for_energy_regress": 32, "modulelist": 32, "comput": [32, 67, 69, 70, 71, 72, 79], "directori": [32, 37, 74, 92], "100": [32, 99], "size": [32, 47, 48, 55, 56, 57, 83], "alreadi": [32, 35, 99], "error": [32, 79, 94, 97], "prompt": 32, "avoid": [32, 94, 97], "overwrit": [32, 77], "sqlitedataconvert": [33, 34, 99], "construct_datafram": [33, 34], "is_pulse_map": [33, 34], "is_mc_tre": [33, 34], "database_exist": [33, 35], "database_table_exist": [33, 35], "run_sql_cod": [33, 35], "save_to_sql": [33, 35], "attach_index": [33, 35], "create_t": [33, 35], "create_table_and_save_to_sql": [33, 35], "db": [34, 80], "max_table_s": 34, "maximum": [34, 48, 69, 70, 71, 83], "row": [34, 35], "exce": 34, "limit": [34, 79], "any_pulsemap_is_non_empti": 34, "data_dict": 34, "empti": [34, 43], "retriev": 34, "splitinicepuls": 34, "least": [34, 97, 99], "true": [34, 35, 43, 74, 77, 79, 81, 87, 88, 90], "becaus": [34, 38], "instead": [34, 79, 85, 90], "alwai": 34, "panda": [34, 39, 81], "datafram": [34, 35, 39, 66, 67, 74, 80, 81], "table_nam": [34, 35], "database_path": [35, 74, 81], "df": 35, "must": [35, 45, 77, 81, 97], "attach": 35, "default_typ": 35, "null": 35, "integer_primary_kei": 35, "NOT": [35, 79], "integ": [35, 55, 56, 79], "primari": 35, "Such": 35, "appropri": [35, 69, 70, 71], "expect": [35, 39, 43, 65], "doesn": 35, "parquettosqliteconvert": [36, 37], "pairwise_shuffl": [36, 38], "stringselectionresolv": [36, 39], "parquet_path": 37, "mc_truth_tabl": 37, "excluded_field": 37, "id": 37, "everi": [37, 99], "field": [37, 75, 78, 85, 87, 88, 90], "One": [37, 75], "choos": 37, "argument": [37, 81, 83, 85, 87, 88, 90], "exclude_field": 37, "database_nam": 37, "convers": [37, 99], "rng": 38, "relat": [38, 92], "i3_list": [38, 92], "gcd_list": [38, 92], "correpond": 38, "handi": 38, "even": 38, "files_list": 38, "gcd_shuffl": 38, "i3_shuffl": 38, "use_cach": 39, "flexibl": 39, "below": [39, 75, 81, 97, 99], "show": [39, 77], "involv": 39, "cover": 39, "yml": [39, 83, 87, 88], "test": [39, 69, 70, 71, 80, 87, 93, 97], "50000": [39, 87], "ab": [39, 79, 87], "12": [39, 87], "14": [39, 87], "16": [39, 87], "13": [39, 99], "compat": 39, "syntax": [39, 79], "mai": [39, 65, 99], "fix": 39, "randomli": [39, 62, 63, 88], "graphnet_modul": [40, 41], "graphneti3modul": [41, 43], "i3inferencemodul": [41, 43], "i3pulsecleanermodul": [41, 43], "pulsemap_extractor": 43, "produc": [43, 78, 81], "write": [43, 99], "constructor": 43, "knngraph": [43, 59, 63], "associ": [43, 62, 70, 79], "model_config": [43, 82, 84, 85, 87, 90], "state_dict": [43, 66], "model_nam": [43, 74], "prediction_column": [43, 66, 67, 80], "pulsmap": 43, "modelconfig": [43, 66, 84, 87, 88], "summar": 43, "Will": [43, 61], "help": [43, 83, 97], "entri": [43, 55, 75, 83], "dynedg": [43, 44, 53, 56, 57], "energy_reco": 43, "discard_empty_ev": 43, "clean": [43, 97, 99], "assum": [43, 50, 71, 72], "7": [43, 48, 74], "consid": [43, 99], "posit": [43, 48, 70], "signal": 43, "els": 43, "fals": [43, 55, 66, 74, 77, 79, 81, 87], "elimin": 43, "speed": 43, "especi": 43, "sinc": [43, 79], "further": 43, "calcul": [43, 61, 63, 67, 72, 78, 79], "convnet": [44, 53], "dynedge_jinst": [44, 53], "dynedge_kaggle_tito": [44, 53], "edg": [44, 47, 48, 55, 56, 57, 59, 62, 63, 64, 65, 72], "unbatch_edge_index": [44, 45], "attributecoarsen": [44, 45], "domcoarsen": [44, 45], "customdomcoarsen": [44, 45], "domandtimewindowcoarsen": [44, 45], "standardmodel": [44, 67], "calculate_xyzt_homophili": [44, 72], "calculate_distance_matrix": [44, 72], "knn_graph_batch": [44, 72], "oper": [45, 47, 53, 55], "cluster": [45, 47, 48, 55, 57], "local": [45, 83], "edge_index": [45, 47, 72], "vector": [45, 48, 79], "longtensor": [45, 48, 72], "mathbf": [45, 48], "ldot": [45, 48], "n": [45, 48, 79], "reduce_opt": 45, "avg": 45, "avg_pool": 45, "avg_pool_x": 45, "max": [45, 47, 55, 57, 79, 83], "max_pool": [45, 48], "max_pool_x": [45, 48], "min": [45, 48, 55, 57], "min_pool": [45, 46, 48], "min_pool_x": [45, 46, 48], "sum": [45, 48, 55, 57, 67], "sum_pool": [45, 46, 48], "sum_pool_x": [45, 46, 48], "forward": [45, 47, 50, 54, 55, 56, 57, 58, 61, 62, 65, 66, 67, 71, 79], "simplecoarsen": 45, "addit": [45, 47, 66, 67, 79, 81], "window": 45, "time_window": 45, "dynedgeconv": [46, 47, 55], "edgeconvtito": [46, 47], "dyntran": [46, 47, 57], "sum_pool_and_distribut": [46, 48], "group_bi": [46, 48], "group_pulses_to_dom": [46, 48], "group_pulses_to_pmt": [46, 48], "std_pool_x": [46, 48], "std_pool": [46, 48], "aggr": 47, "nb_neighbor": 47, "features_subset": [47, 55, 57], "edgeconv": 47, "lightningmodul": [47, 66, 77, 94], "convolut": [47, 54, 55, 56, 57], "mlp": [47, 55], "aggreg": [47, 48], "8": [47, 48, 55, 63, 79, 97, 99], "neighbour": [47, 55, 57, 61, 63, 72], "after": [47, 55, 77, 83, 87], "sequenc": 47, "slice": [47, 55, 57], "sparsetensor": 47, "messagepass": 47, "tito": [47, 57], "solut": [47, 57, 97], "deep": [47, 57], "competit": [47, 51, 57], "reset_paramet": 47, "reset": 47, "learnabl": [47, 53, 54, 55, 56, 57, 58], "messag": [47, 77, 94], "x_i": 47, "x_j": 47, "layer_s": 47, "n_head": 47, "dyntrans1": 47, "head": 47, "multiheadattent": 47, "just": [48, 99], "negat": 48, "cluster_index": 48, "distribut": [48, 55, 70, 79, 81], "ident": [48, 71], "pmt": 48, "f1": 48, "f2": 48, "6": [48, 75], "groupbi": 48, "3": [48, 54, 57, 70, 72, 74, 75, 79, 97, 99], "matrix": [48, 61, 72, 79], "mathbb": 48, "r": [48, 61, 99], "n_1": 48, "n_b": 48, "obtain": [48, 79], "wise": 48, "dens": 48, "fc": 48, "known": 48, "std": 48, "repres": [48, 62, 63, 65, 85, 87, 88], "averag": [48, 79], "torch_geometr": 48, "version": [48, 69, 70, 71, 77, 97, 99], "standardis": 49, "icecubekaggl": [49, 51], "icecubedeepcor": [49, 51], "icecubeupgrad": [49, 51], "ins": 50, "feature_map": [50, 51, 52], "node_featur": [50, 62], "node_feature_nam": [50, 62, 63, 65], "adjac": 50, "dimens": [51, 52, 54, 55, 57, 79], "prototyp": 52, "dynedgejinst": [53, 56], "dynedgetito": [53, 57], "author": [54, 56, 79], "martin": 54, "minh": 54, "nb_input": [54, 55, 56, 57, 58, 69, 70, 71], "nb_output": [54, 56, 58, 65, 69, 71], "nb_intermedi": 54, "128": [54, 55, 83], "dropout_ratio": 54, "fraction": 54, "drop": 54, "nb_neighbour": 55, "k": [55, 57, 61, 63, 72, 79], "nearest": [55, 57, 61, 63, 72], "latent": [55, 57, 69], "metric": [55, 57, 77], "dynedge_layer_s": 55, "dimenion": [55, 57], "multi": 55, "perceptron": 55, "256": 55, "336": 55, "post_processing_layer_s": 55, "hidden": [55, 56, 69, 71], "skip": 55, "readout_layer_s": 55, "post": 55, "_and_": 55, "As": 55, "last": [55, 69, 71, 77], "global_pooling_schem": [55, 57], "scheme": [55, 57], "add_global_variables_after_pool": 55, "altern": [55, 79, 97], "exact": [56, 79], "2209": 56, "03042": 56, "oerso": 56, "layer_size_scal": 56, "4": [56, 57, 70, 75], "scale": [56, 62, 69, 70, 71, 79], "ic": 57, "univers": 57, "south": 57, "pole": 57, "dyntrans_layer_s": 57, "core": 58, "edgedefinit": [59, 60, 61, 62, 64], "how": [59, 60, 64], "drawn": [59, 60, 63, 64], "between": [59, 60, 61, 64, 67, 72, 77, 79, 87, 88], "knnedg": [60, 61], "radialedg": [60, 61], "euclideanedg": [60, 61], "_construct_edg": 61, "definit": [61, 62, 63, 65, 66, 97], "nb_nearest_neighbour": [61, 63], "space": [61, 81], "distanc": [61, 63, 72], "sphere": 61, "chosen": [61, 94], "radiu": 61, "centr": 61, "radial": 61, "center": 61, "euclidean": [61, 97], "see": [61, 62, 77, 97, 99], "http": [61, 62, 79, 97], "arxiv": [61, 79], "org": [61, 79, 99], "pdf": 61, "1809": 61, "06166": 61, "hold": 62, "alter": 62, "dure": [62, 69, 70, 71, 77], "geometri": 62, "node_definit": [62, 63], "edge_definit": 62, "nodedefinit": [62, 63, 64, 65], "nodesaspuls": [62, 63, 64, 65], "perturbation_dict": [62, 63], "deviat": [62, 63], "perturb": [62, 63], "truth_dict": 62, "custom_label_funct": 62, "loss_weight": [62, 69, 70, 71], "data_path": 62, "shape": [62, 65, 72, 79], "num_nod": 62, "github": [62, 79, 99], "com": [62, 79, 99], "team": [62, 97], "blob": [62, 79], "getting_start": 62, "md": 62, "where": [62, 63, 65, 78], "your": [63, 97, 99], "num_puls": 65, "overridden": 65, "set_number_of_input": 65, "measur": [65, 72], "cherenkov": 65, "radiat": 65, "train_dataload": 66, "val_dataload": 66, "max_epoch": 66, "gpu": [66, 67, 83, 99], "ckpt_path": 66, "log_every_n_step": 66, "gradient_clip_v": 66, "distribution_strategi": [66, 67], "trainer_kwarg": 66, "pytorch_lightn": [66, 94], "trainer": [66, 77, 80], "predict_as_datafram": [66, 67], "additional_attribut": [66, 67, 80], "save_state_dict": 66, "load_state_dict": 66, "karg": 66, "trust": 66, "enough": 66, "eval": [66, 99], "lambda": 66, "consequ": 66, "target_label": [67, 69, 70, 71], "target": [67, 69, 70, 71, 79, 90], "prediction_label": [67, 69, 70, 71], "configure_optim": 67, "optim": [67, 77], "shared_step": 67, "batch_idx": 67, "share": 67, "step": [67, 77], "training_step": 67, "train_batch": 67, "validation_step": 67, "val_batch": 67, "compute_loss": [67, 69, 70, 71], "pred": [67, 71], "verbos": [67, 77], "activ": [67, 71, 97, 99], "mode": [67, 71], "deactiv": [67, 71], "multiclassclassificationtask": [68, 69], "binaryclassificationtask": [68, 69], "binaryclassificationtasklogit": [68, 69], "azimuthreconstructionwithkappa": [68, 70], "azimuthreconstruct": [68, 70], "directionreconstructionwithkappa": [68, 70], "zenithreconstruct": [68, 70], "zenithreconstructionwithkappa": [68, 70], "energyreconstruct": [68, 70], "energyreconstructionwithpow": [68, 70], "energyreconstructionwithuncertainti": [68, 70], "vertexreconstruct": [68, 70], "positionreconstruct": [68, 70], "timereconstruct": [68, 70], "inelasticityreconstruct": [68, 70], "identitytask": [68, 69, 71], "classifi": 69, "untransform": 69, "logit": [69, 79], "affin": [69, 70, 71], "binari": [69, 79], "hidden_s": [69, 70, 71], "feed": [69, 70, 71], "lossfunct": [69, 70, 71, 76, 79], "auto": [69, 70, 71], "matic": [69, 70, 71], "_pred": [69, 70, 71], "transform_prediction_and_target": [69, 70, 71], "numer": [69, 70, 71], "stabl": [69, 70, 71], "transform_target": [69, 70, 71], "log10": [69, 70, 71, 81], "rather": [69, 70, 71, 94], "conjunct": [69, 70, 71], "transform_infer": [69, 70, 71], "invers": [69, 70, 71], "recov": [69, 70, 71], "transform_support": [69, 70, 71], "minimum": [69, 70, 71], "restrict": [69, 70, 71, 79], "invert": [69, 70, 71], "1e6": [69, 70, 71], "default_target_label": [69, 70, 71], "default_prediction_label": [69, 70, 71], "target_pr": 69, "angl": [70, 78], "kappa": [70, 79], "var": 70, "azimuth_pr": 70, "azimuth_kappa": 70, "3d": [70, 79], "vmf": 70, "dir_x_pr": 70, "dir_y_pr": 70, "dir_z_pr": 70, "direction_kappa": 70, "zenith_pr": 70, "zenith_kappa": 70, "energy_pr": 70, "uncertainti": 70, "energy_sigma": 70, "vertex": 70, "position_x_pr": 70, "position_y_pr": 70, "position_z_pr": 70, "interaction_time_pr": 70, "interact": 70, "hadron": 70, "inelasticity_pr": 70, "wrt": 71, "train_ev": 71, "xyzt": 72, "homophili": 72, "notic": [72, 79], "xyz_coord": 72, "pairwis": 72, "nb_dom": 72, "updat": [72, 74, 77], "config_updat": [73, 74], "weightfitt": [73, 74, 76, 81], "contourfitt": [73, 74], "read_entri": [73, 75], "plot_2d_contour": [73, 75], "plot_1d_contour": [73, 75], "contour": [74, 75], "config_path": 74, "new_config_path": 74, "dummy_sect": 74, "temp": 74, "dummi": 74, "section": 74, "header": 74, "configupdat": 74, "programat": 74, "statistical_fit": 74, "fit_weight": [74, 81], "config_outdir": 74, "weight_nam": [74, 81], "pisa_config_dict": 74, "add_to_databas": [74, 81], "flux": 74, "_database_path": 74, "statist": 74, "effect": [74, 77, 97], "account": 74, "systemat": 74, "hypersurfac": 74, "chang": [74, 79, 97], "assumpt": 74, "regard": 74, "pipeline_path": 74, "post_fix": 74, "include_retro": 74, "fit_1d_contour": 74, "run_nam": 74, "config_dict": 74, "grid_siz": 74, "theta23_minmax": 74, "36": 74, "54": 74, "dm31_minmax": 74, "1d": [74, 75], "fit_2d_contour": 74, "2d": [74, 75, 79], "content": 75, "contour_data": 75, "xlim": 75, "ylim": 75, "0023799999999999997": 75, "0025499999999999997": 75, "chi2_critical_valu": 75, "width": 75, "height": 75, "path_to_pisa_fit_result": 75, "name_of_my_model_in_fit": 75, "legend": 75, "color": 75, "linestyl": 75, "style": [75, 97], "line": [75, 77, 83], "upper": 75, "axi": 75, "605": 75, "critic": [75, 94], "chi2": 75, "90": 75, "cl": 75, "right": [75, 79], "176": 75, "inch": 75, "388": 75, "706": 75, "abov": [75, 79, 81, 99], "352": 75, "piecewiselinearlr": [76, 77], "progressbar": [76, 77], "mseloss": [76, 79], "rmseloss": [76, 79], "logcoshloss": [76, 79], "crossentropyloss": [76, 79], "binarycrossentropyloss": [76, 79], "logcmk": [76, 79], "vonmisesfisherloss": [76, 79], "vonmisesfisher2dloss": [76, 79], "euclideandistanceloss": [76, 79], "vonmisesfisher3dloss": [76, 79], "make_dataload": [76, 80], "make_train_validation_dataload": [76, 80], "get_predict": [76, 80], "save_result": [76, 80], "uniform": [76, 81], "bjoernlow": [76, 81], "mileston": 77, "factor": 77, "last_epoch": 77, "_lrschedul": 77, "interpol": 77, "linearli": 77, "denot": 77, "multipli": 77, "closest": 77, "vice": 77, "versa": 77, "wrap": [77, 87, 88], "epoch": [77, 83], "print": [77, 94], "stdout": 77, "get_lr": 77, "refresh_r": 77, "process_posit": 77, "tqdmprogressbar": 77, "progress": 77, "bar": 77, "customis": 77, "lightn": 77, "init_validation_tqdm": 77, "overrid": 77, "init_predict_tqdm": 77, "init_test_tqdm": 77, "init_train_tqdm": 77, "get_metr": 77, "on_train_epoch_start": 77, "previou": 77, "behaviour": 77, "on_train_epoch_end": 77, "don": [77, 99], "duplciat": 77, "runtim": [78, 99], "azimuth_kei": 78, "zenith_kei": 78, "access": [78, 99], "azimiuth": 78, "return_el": 79, "elementwis": 79, "term": 79, "squar": 79, "root": [79, 99], "cosh": 79, "act": 79, "small": 79, "cross": 79, "entropi": 79, "num_class": 79, "softmax": 79, "ed": 79, "probabl": 79, "mit": 79, "licens": 79, "copyright": 79, "2019": 79, "ryabinin": 79, "permiss": 79, "herebi": 79, "person": 79, "copi": 79, "document": 79, "deal": 79, "modifi": 79, "publish": 79, "sublicens": 79, "sell": 79, "permit": 79, "whom": 79, "furnish": 79, "so": [79, 99], "subject": 79, "condit": 79, "shall": 79, "substanti": 79, "portion": 79, "THE": 79, "AS": 79, "warranti": 79, "OF": 79, "kind": 79, "OR": 79, "impli": 79, "BUT": 79, "TO": 79, "merchant": 79, "FOR": 79, "particular": [79, 97], "AND": 79, "noninfring": 79, "IN": 79, "NO": 79, "holder": 79, "BE": 79, "liabl": 79, "claim": 79, "damag": 79, "liabil": 79, "action": 79, "contract": 79, "tort": 79, "aris": 79, "WITH": 79, "_____________________": 79, "mryab": 79, "vmf_loss": 79, "master": 79, "py": [79, 99], "bessel": 79, "exponenti": 79, "ditto": 79, "iv": 79, "1812": 79, "04616": 79, "spite": 79, "suggest": 79, "sec": 79, "paper": 79, "m": 79, "correct": 79, "static": [79, 97], "ctx": 79, "backward": 79, "grad_output": 79, "von": 79, "mise": 79, "fisher": 79, "log_cmk_exact": 79, "c_": 79, "exactli": [79, 94], "log_cmk_approx": 79, "approx": 79, "minu": 79, "sign": 79, "log_cmk": 79, "kappa_switch": 79, "diverg": 79, "700": 79, "float64": 79, "precis": 79, "unaccur": 79, "switch": 79, "three": 79, "database_indic": 80, "test_siz": 80, "node_level": 80, "tag": [80, 97, 99], "archiv": 80, "public": 81, "uniformweightfitt": 81, "bin": 81, "privat": 81, "_fit_weight": 81, "sql": 81, "desir": [81, 92], "np": 81, "happen": 81, "x_low": 81, "wherea": 81, "curv": 81, "base_config": [82, 84], "dataset_config": [82, 84], "training_config": [82, 84], "argumentpars": [82, 83], "is_gcd_fil": [82, 92], "is_i3_fil": [82, 92], "has_extens": [82, 92], "find_i3_fil": [82, 92], "has_icecube_packag": [82, 93], "has_torch_packag": [82, 93], "has_pisa_packag": [82, 93], "requires_icecub": [82, 93], "repeatfilt": [82, 94], "eps_lik": [82, 95], "consist": [83, 94, 97], "cli": 83, "pop_default": 83, "usag": 83, "descript": 83, "command": [83, 99], "standard_argu": 83, "home": [83, 99], "runner": 83, "lib": [83, 99], "python3": 83, "training_example_data_sqlit": 83, "earli": 83, "patienc": 83, "narg": 83, "50": 83, "example_energy_reconstruction_model": 83, "num": 83, "fetch": 83, "with_standard_argu": 83, "overwritten": [83, 85], "baseconfig": [84, 85, 86, 87, 88, 90], "get_all_argument_valu": [84, 85], "save_dataset_config": [84, 87], "datasetconfigsavermeta": [84, 87], "datasetconfigsaverabcmeta": [84, 87], "save_model_config": [84, 88], "modelconfigsavermeta": [84, 88], "modelconfigsaverabc": [84, 88], "traverse_and_appli": [84, 89], "list_all_submodul": [84, 89], "get_all_grapnet_class": [84, 89], "is_graphnet_modul": [84, 89], "is_graphnet_class": [84, 89], "get_graphnet_class": [84, 89], "trainingconfig": [84, 90], "basemodel": [85, 87, 88], "keyword": [85, 90], "validationerror": [85, 90], "pydantic_cor": [85, 90], "__init__": [85, 87, 88, 90, 99], "__pydantic_self__": [85, 90], "dump": [85, 87, 88], "yaml": [85, 86], "as_dict": [85, 87, 88], "classvar": [85, 87, 88, 90], "configdict": [85, 87, 88, 90], "conform": [85, 87, 88, 90], "pydant": [85, 87, 88, 90], "model_field": [85, 87, 88, 90], "fieldinfo": [85, 87, 88, 90], "metadata": [85, 87, 88, 90], "about": [85, 87, 88, 90], "__fields__": [85, 87, 88, 90], "v1": [85, 87, 88, 90, 99], "re": [86, 99], "save_config": 86, "dataconfig": 87, "transpar": [87, 88, 97], "reproduc": [87, 88], "In": [87, 88, 99], "session": [87, 88], "anoth": [87, 88], "you": [87, 88, 97, 99], "still": 87, "csv": 87, "train_select": 87, "test_select": 87, "unambigu": [87, 88], "annot": [87, 88, 90], "nonetyp": 87, "init_fn": [87, 88], "metaclass": [87, 88], "abcmeta": [87, 88], "datasetconfigsav": 87, "trainabl": 88, "hyperparamet": 88, "instanti": 88, "thu": 88, "modelconfigsav": 88, "fn_kwarg": 89, "structur": 89, "moduletyp": 89, "grapnet": 89, "lookup": 89, "early_stopping_pati": 90, "system": [92, 99], "filenam": 92, "dir": 92, "search": 92, "test_funct": 93, "filter": 94, "repeat": 94, "nb_repeats_allow": 94, "record": 94, "logrecord": 94, "log_fold": 94, "clear": 94, "intuit": 94, "composit": 94, "loggeradapt": 94, "clash": 94, "setlevel": 94, "deleg": 94, "msg": 94, "warn": 94, "info": [94, 99], "debug": 94, "warning_onc": 94, "onc": 94, "handler": 94, "file_handl": 94, "filehandl": 94, "stream_handl": 94, "streamhandl": 94, "assort": 95, "ep": 95, "api": 96, "To": [97, 99], "sure": [97, 99], "smooth": 97, "guidelin": 97, "guid": 97, "encourag": 97, "contributor": 97, "discuss": 97, "bug": 97, "anyth": 97, "place": 97, "describ": 97, "yourself": 97, "ownership": 97, "prioriti": 97, "situat": 97, "lot": 97, "effort": 97, "go": 97, "turn": 97, "outsid": 97, "scope": 97, "better": 97, "fork": 97, "repo": 97, "dedic": 97, "branch": [97, 99], "repositori": 97, "own": [97, 99], "accept": 97, "autom": 97, "review": 97, "pep8": 97, "docstr": 97, "googl": 97, "hint": 97, "adher": 97, "pep": 97, "pylint": 97, "flake8": 97, "black": 97, "well": 97, "recommend": [97, 99], "mypi": 97, "pydocstyl": 97, "docformatt": 97, "commit": 97, "hook": 97, "instal": 97, "come": 97, "pip": [97, 99], "Then": 97, "everytim": 97, "pep257": 97, "concept": 97, "ljvmiranda921": 97, "io": 97, "notebook": 97, "2018": 97, "06": 97, "21": 97, "precommit": 97, "environ": 99, "virtual": 99, "anaconda": 99, "prove": 99, "instruct": 99, "setup": 99, "want": 99, "part": 99, "achiev": 99, "bash": 99, "shell": 99, "cvmf": 99, "opensciencegrid": 99, "py3": 99, "v4": 99, "sh": 99, "rhel_7_x86_64": 99, "metaproject": 99, "env": 99, "alia": 99, "script": 99, "With": 99, "now": 99, "light": 99, "extra": 99, "geometr": 99, "won": 99, "later": 99, "torch_cpu": 99, "txt": 99, "cpu": 99, "torch_gpu": 99, "prefer": 99, "unix": 99, "git": 99, "clone": 99, "usernam": 99, "cd": 99, "conda": 99, "gcc_linux": 99, "64": 99, "gxx_linux": 99, "libgcc": 99, "cudatoolkit": 99, "11": 99, "forg": 99, "torch_maco": 99, "On": 99, "maco": 99, "box": 99, "compil": 99, "gcc": 99, "date": 99, "possibli": 99, "cuda": 99, "toolkit": 99, "recent": 99, "omit": 99, "newer": 99, "export": 99, "ld_library_path": 99, "anaconda3": 99, "miniconda3": 99, "bashrc": 99, "librari": 99, "intend": 99, "rm": 99, "asogaard": 99, "latest": 99, "dc423315742c": 99, "01_icetrai": 99, "01_convert_i3_fil": 99, "2023": 99, "01": 99, "24": 99, "41": 99, "27": 99, "graphnet_20230124": 99, "134127": 99, "46": 99, "convert_i3_fil": 99, "ic86": 99, "thread": 99, "00": 99, "79": 99, "42": 99, "26": 99, "413": 99, "88it": 99, "specialis": 99, "ones": 99, "push": 99, "vx": 99}, "objects": {"": [[1, 0, 0, "-", "graphnet"]], "graphnet": [[2, 0, 0, "-", "constants"], [3, 0, 0, "-", "data"], [40, 0, 0, "-", "deployment"], [44, 0, 0, "-", "models"], [73, 0, 0, "-", "pisa"], [76, 0, 0, "-", "training"], [82, 0, 0, "-", "utilities"]], "graphnet.data": [[4, 0, 0, "-", "constants"], [5, 0, 0, "-", "dataconverter"], [6, 0, 0, "-", "dataloader"], [7, 0, 0, "-", "dataset"], [13, 0, 0, "-", "extractors"], [30, 0, 0, "-", "parquet"], [32, 0, 0, "-", "pipeline"], [33, 0, 0, "-", "sqlite"], [36, 0, 0, "-", "utilities"]], "graphnet.data.constants": [[4, 1, 1, "", "FEATURES"], [4, 1, 1, "", "TRUTH"]], "graphnet.data.constants.FEATURES": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.constants.TRUTH": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.dataconverter": [[5, 1, 1, "", "DataConverter"], [5, 1, 1, "", "FileSet"], [5, 5, 1, "", "cache_output_files"], [5, 5, 1, "", "init_global_index"]], "graphnet.data.dataconverter.DataConverter": [[5, 3, 1, "", "execute"], [5, 4, 1, "", "file_suffix"], [5, 3, 1, "", "get_map_function"], [5, 3, 1, "", "merge_files"], [5, 3, 1, "", "save_data"]], "graphnet.data.dataconverter.FileSet": [[5, 2, 1, "", "gcd_file"], [5, 2, 1, "", "i3_file"]], "graphnet.data.dataloader": [[6, 1, 1, "", "DataLoader"], [6, 5, 1, "", "collate_fn"], [6, 5, 1, "", "do_shuffle"]], "graphnet.data.dataloader.DataLoader": [[6, 3, 1, "", "from_dataset_config"]], "graphnet.data.dataset": [[8, 0, 0, "-", "dataset"], [9, 0, 0, "-", "parquet"], [11, 0, 0, "-", "sqlite"]], "graphnet.data.dataset.dataset": [[8, 6, 1, "", "ColumnMissingException"], [8, 1, 1, "", "Dataset"], [8, 1, 1, "", "EnsembleDataset"], [8, 5, 1, "", "load_module"], [8, 5, 1, "", "parse_graph_definition"]], "graphnet.data.dataset.dataset.Dataset": [[8, 3, 1, "", "add_label"], [8, 3, 1, "", "concatenate"], [8, 3, 1, "", "from_config"], [8, 4, 1, "", "path"], [8, 3, 1, "", "query_table"], [8, 4, 1, "", "truth_table"]], "graphnet.data.dataset.parquet": [[10, 0, 0, "-", "parquet_dataset"]], "graphnet.data.dataset.parquet.parquet_dataset": [[10, 1, 1, "", "ParquetDataset"]], "graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset": [[10, 3, 1, "", "query_table"]], "graphnet.data.dataset.sqlite": [[12, 0, 0, "-", "sqlite_dataset"]], "graphnet.data.dataset.sqlite.sqlite_dataset": [[12, 1, 1, "", "SQLiteDataset"]], "graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset": [[12, 3, 1, "", "query_table"]], "graphnet.data.extractors": [[14, 0, 0, "-", "i3extractor"], [15, 0, 0, "-", "i3featureextractor"], [16, 0, 0, "-", "i3genericextractor"], [17, 0, 0, "-", "i3hybridrecoextractor"], [18, 0, 0, "-", "i3ntmuonlabelsextractor"], [19, 0, 0, "-", "i3particleextractor"], [20, 0, 0, "-", "i3pisaextractor"], [21, 0, 0, "-", "i3quesoextractor"], [22, 0, 0, "-", "i3retroextractor"], [23, 0, 0, "-", "i3splinempeextractor"], [24, 0, 0, "-", "i3truthextractor"], [25, 0, 0, "-", "i3tumextractor"], [26, 0, 0, "-", "utilities"]], "graphnet.data.extractors.i3extractor": [[14, 1, 1, "", "I3Extractor"], [14, 1, 1, "", "I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor.I3Extractor": [[14, 4, 1, "", "name"], [14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3extractor.I3ExtractorCollection": [[14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3featureextractor": [[15, 1, 1, "", "I3FeatureExtractor"], [15, 1, 1, "", "I3FeatureExtractorIceCube86"], [15, 1, 1, "", "I3FeatureExtractorIceCubeDeepCore"], [15, 1, 1, "", "I3FeatureExtractorIceCubeUpgrade"], [15, 1, 1, "", "I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3genericextractor": [[16, 1, 1, "", "I3GenericExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, 1, 1, "", "I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, 1, 1, "", "I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, 1, 1, "", "I3ParticleExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, 1, 1, "", "I3PISAExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, 1, 1, "", "I3QUESOExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, 1, 1, "", "I3RetroExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, 1, 1, "", "I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, 1, 1, "", "I3TruthExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, 1, 1, "", "I3TUMExtractor"]], "graphnet.data.extractors.utilities": [[27, 0, 0, "-", "collections"], [28, 0, 0, "-", "frames"], [29, 0, 0, "-", "types"]], "graphnet.data.extractors.utilities.collections": [[27, 5, 1, "", "flatten_nested_dictionary"], [27, 5, 1, "", "serialise"], [27, 5, 1, "", "transpose_list_of_dicts"]], "graphnet.data.extractors.utilities.frames": [[28, 5, 1, "", "frame_is_montecarlo"], [28, 5, 1, "", "frame_is_noise"], [28, 5, 1, "", "get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.types": [[29, 5, 1, "", "break_cyclic_recursion"], [29, 5, 1, "", "cast_object_to_pure_python"], [29, 5, 1, "", "cast_pulse_series_to_pure_python"], [29, 5, 1, "", "get_member_variables"], [29, 5, 1, "", "is_boost_class"], [29, 5, 1, "", "is_boost_enum"], [29, 5, 1, "", "is_icecube_class"], [29, 5, 1, "", "is_method"], [29, 5, 1, "", "is_type"]], "graphnet.data.parquet": [[31, 0, 0, "-", "parquet_dataconverter"]], "graphnet.data.parquet.parquet_dataconverter": [[31, 1, 1, "", "ParquetDataConverter"]], "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter": [[31, 2, 1, "", "file_suffix"], [31, 3, 1, "", "merge_files"], [31, 3, 1, "", "save_data"]], "graphnet.data.pipeline": [[32, 1, 1, "", "InSQLitePipeline"]], "graphnet.data.sqlite": [[34, 0, 0, "-", "sqlite_dataconverter"], [35, 0, 0, "-", "sqlite_utilities"]], "graphnet.data.sqlite.sqlite_dataconverter": [[34, 1, 1, "", "SQLiteDataConverter"], [34, 5, 1, "", "construct_dataframe"], [34, 5, 1, "", "is_mc_tree"], [34, 5, 1, "", "is_pulse_map"]], "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter": [[34, 3, 1, "", "any_pulsemap_is_non_empty"], [34, 2, 1, "", "file_suffix"], [34, 3, 1, "", "merge_files"], [34, 3, 1, "", "save_data"]], "graphnet.data.sqlite.sqlite_utilities": [[35, 5, 1, "", "attach_index"], [35, 5, 1, "", "create_table"], [35, 5, 1, "", "create_table_and_save_to_sql"], [35, 5, 1, "", "database_exists"], [35, 5, 1, "", "database_table_exists"], [35, 5, 1, "", "run_sql_code"], [35, 5, 1, "", "save_to_sql"]], "graphnet.data.utilities": [[37, 0, 0, "-", "parquet_to_sqlite"], [38, 0, 0, "-", "random"], [39, 0, 0, "-", "string_selection_resolver"]], "graphnet.data.utilities.parquet_to_sqlite": [[37, 1, 1, "", "ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter": [[37, 3, 1, "", "run"]], "graphnet.data.utilities.random": [[38, 5, 1, "", "pairwise_shuffle"]], "graphnet.data.utilities.string_selection_resolver": [[39, 1, 1, "", "StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver": [[39, 3, 1, "", "resolve"]], "graphnet.deployment.i3modules": [[43, 0, 0, "-", "graphnet_module"]], "graphnet.deployment.i3modules.graphnet_module": [[43, 1, 1, "", "GraphNeTI3Module"], [43, 1, 1, "", "I3InferenceModule"], [43, 1, 1, "", "I3PulseCleanerModule"]], "graphnet.models": [[45, 0, 0, "-", "coarsening"], [46, 0, 0, "-", "components"], [49, 0, 0, "-", "detector"], [53, 0, 0, "-", "gnn"], [59, 0, 0, "-", "graphs"], [66, 0, 0, "-", "model"], [67, 0, 0, "-", "standard_model"], [68, 0, 0, "-", "task"], [72, 0, 0, "-", "utils"]], "graphnet.models.coarsening": [[45, 1, 1, "", "AttributeCoarsening"], [45, 1, 1, "", "Coarsening"], [45, 1, 1, "", "CustomDOMCoarsening"], [45, 1, 1, "", "DOMAndTimeWindowCoarsening"], [45, 1, 1, "", "DOMCoarsening"], [45, 5, 1, "", "unbatch_edge_index"]], "graphnet.models.coarsening.Coarsening": [[45, 3, 1, "", "forward"], [45, 2, 1, "", "reduce_options"]], "graphnet.models.components": [[47, 0, 0, "-", "layers"], [48, 0, 0, "-", "pool"]], "graphnet.models.components.layers": [[47, 1, 1, "", "DynEdgeConv"], [47, 1, 1, "", "DynTrans"], [47, 1, 1, "", "EdgeConvTito"]], "graphnet.models.components.layers.DynEdgeConv": [[47, 3, 1, "", "forward"]], "graphnet.models.components.layers.DynTrans": [[47, 3, 1, "", "forward"]], "graphnet.models.components.layers.EdgeConvTito": [[47, 3, 1, "", "forward"], [47, 3, 1, "", "message"], [47, 3, 1, "", "reset_parameters"]], "graphnet.models.components.pool": [[48, 5, 1, "", "group_by"], [48, 5, 1, "", "group_pulses_to_dom"], [48, 5, 1, "", "group_pulses_to_pmt"], [48, 5, 1, "", "min_pool"], [48, 5, 1, "", "min_pool_x"], [48, 5, 1, "", "std_pool"], [48, 5, 1, "", "std_pool_x"], [48, 5, 1, "", "sum_pool"], [48, 5, 1, "", "sum_pool_and_distribute"], [48, 5, 1, "", "sum_pool_x"]], "graphnet.models.detector": [[50, 0, 0, "-", "detector"], [51, 0, 0, "-", "icecube"], [52, 0, 0, "-", "prometheus"]], "graphnet.models.detector.detector": [[50, 1, 1, "", "Detector"]], "graphnet.models.detector.detector.Detector": [[50, 3, 1, "", "feature_map"], [50, 3, 1, "", "forward"]], "graphnet.models.detector.icecube": [[51, 1, 1, "", "IceCube86"], [51, 1, 1, "", "IceCubeDeepCore"], [51, 1, 1, "", "IceCubeKaggle"], [51, 1, 1, "", "IceCubeUpgrade"]], "graphnet.models.detector.icecube.IceCube86": [[51, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeDeepCore": [[51, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeKaggle": [[51, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeUpgrade": [[51, 3, 1, "", "feature_map"]], "graphnet.models.detector.prometheus": [[52, 1, 1, "", "Prometheus"]], "graphnet.models.detector.prometheus.Prometheus": [[52, 3, 1, "", "feature_map"]], "graphnet.models.gnn": [[54, 0, 0, "-", "convnet"], [55, 0, 0, "-", "dynedge"], [56, 0, 0, "-", "dynedge_jinst"], [57, 0, 0, "-", "dynedge_kaggle_tito"], [58, 0, 0, "-", "gnn"]], "graphnet.models.gnn.convnet": [[54, 1, 1, "", "ConvNet"]], "graphnet.models.gnn.convnet.ConvNet": [[54, 3, 1, "", "forward"]], "graphnet.models.gnn.dynedge": [[55, 1, 1, "", "DynEdge"]], "graphnet.models.gnn.dynedge.DynEdge": [[55, 3, 1, "", "forward"]], "graphnet.models.gnn.dynedge_jinst": [[56, 1, 1, "", "DynEdgeJINST"]], "graphnet.models.gnn.dynedge_jinst.DynEdgeJINST": [[56, 3, 1, "", "forward"]], "graphnet.models.gnn.dynedge_kaggle_tito": [[57, 1, 1, "", "DynEdgeTITO"]], "graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO": [[57, 3, 1, "", "forward"]], "graphnet.models.gnn.gnn": [[58, 1, 1, "", "GNN"]], "graphnet.models.gnn.gnn.GNN": [[58, 3, 1, "", "forward"], [58, 4, 1, "", "nb_inputs"], [58, 4, 1, "", "nb_outputs"]], "graphnet.models.graphs": [[60, 0, 0, "-", "edges"], [62, 0, 0, "-", "graph_definition"], [63, 0, 0, "-", "graphs"], [64, 0, 0, "-", "nodes"]], "graphnet.models.graphs.edges": [[61, 0, 0, "-", "edges"]], "graphnet.models.graphs.edges.edges": [[61, 1, 1, "", "EdgeDefinition"], [61, 1, 1, "", "EuclideanEdges"], [61, 1, 1, "", "KNNEdges"], [61, 1, 1, "", "RadialEdges"]], "graphnet.models.graphs.edges.edges.EdgeDefinition": [[61, 3, 1, "", "forward"]], "graphnet.models.graphs.graph_definition": [[62, 1, 1, "", "GraphDefinition"]], "graphnet.models.graphs.graph_definition.GraphDefinition": [[62, 3, 1, "", "forward"]], "graphnet.models.graphs.graphs": [[63, 1, 1, "", "KNNGraph"]], "graphnet.models.graphs.nodes": [[65, 0, 0, "-", "nodes"]], "graphnet.models.graphs.nodes.nodes": [[65, 1, 1, "", "NodeDefinition"], [65, 1, 1, "", "NodesAsPulses"]], "graphnet.models.graphs.nodes.nodes.NodeDefinition": [[65, 3, 1, "", "forward"], [65, 4, 1, "", "nb_outputs"], [65, 3, 1, "", "set_number_of_inputs"]], "graphnet.models.model": [[66, 1, 1, "", "Model"]], "graphnet.models.model.Model": [[66, 3, 1, "", "fit"], [66, 3, 1, "", "forward"], [66, 3, 1, "", "from_config"], [66, 3, 1, "", "load"], [66, 3, 1, "", "load_state_dict"], [66, 3, 1, "", "predict"], [66, 3, 1, "", "predict_as_dataframe"], [66, 3, 1, "", "save"], [66, 3, 1, "", "save_state_dict"]], "graphnet.models.standard_model": [[67, 1, 1, "", "StandardModel"]], "graphnet.models.standard_model.StandardModel": [[67, 3, 1, "", "compute_loss"], [67, 3, 1, "", "configure_optimizers"], [67, 3, 1, "", "forward"], [67, 3, 1, "", "inference"], [67, 3, 1, "", "predict"], [67, 3, 1, "", "predict_as_dataframe"], [67, 4, 1, "", "prediction_labels"], [67, 3, 1, "", "shared_step"], [67, 4, 1, "", "target_labels"], [67, 3, 1, "", "train"], [67, 3, 1, "", "training_step"], [67, 3, 1, "", "validation_step"]], "graphnet.models.task": [[69, 0, 0, "-", "classification"], [70, 0, 0, "-", "reconstruction"], [71, 0, 0, "-", "task"]], "graphnet.models.task.classification": [[69, 1, 1, "", "BinaryClassificationTask"], [69, 1, 1, "", "BinaryClassificationTaskLogits"], [69, 1, 1, "", "MulticlassClassificationTask"]], "graphnet.models.task.classification.BinaryClassificationTask": [[69, 2, 1, "", "default_prediction_labels"], [69, 2, 1, "", "default_target_labels"], [69, 2, 1, "", "nb_inputs"]], "graphnet.models.task.classification.BinaryClassificationTaskLogits": [[69, 2, 1, "", "default_prediction_labels"], [69, 2, 1, "", "default_target_labels"], [69, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction": [[70, 1, 1, "", "AzimuthReconstruction"], [70, 1, 1, "", "AzimuthReconstructionWithKappa"], [70, 1, 1, "", "DirectionReconstructionWithKappa"], [70, 1, 1, "", "EnergyReconstruction"], [70, 1, 1, "", "EnergyReconstructionWithPower"], [70, 1, 1, "", "EnergyReconstructionWithUncertainty"], [70, 1, 1, "", "InelasticityReconstruction"], [70, 1, 1, "", "PositionReconstruction"], [70, 1, 1, "", "TimeReconstruction"], [70, 1, 1, "", "VertexReconstruction"], [70, 1, 1, "", "ZenithReconstruction"], [70, 1, 1, "", "ZenithReconstructionWithKappa"]], "graphnet.models.task.reconstruction.AzimuthReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyReconstructionWithPower": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.InelasticityReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.PositionReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.TimeReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.VertexReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.ZenithReconstruction": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa": [[70, 2, 1, "", "default_prediction_labels"], [70, 2, 1, "", "default_target_labels"], [70, 2, 1, "", "nb_inputs"]], "graphnet.models.task.task": [[71, 1, 1, "", "IdentityTask"], [71, 1, 1, "", "Task"]], "graphnet.models.task.task.IdentityTask": [[71, 4, 1, "", "default_prediction_labels"], [71, 4, 1, "", "default_target_labels"], [71, 4, 1, "", "nb_inputs"]], "graphnet.models.task.task.Task": [[71, 3, 1, "", "compute_loss"], [71, 4, 1, "", "default_prediction_labels"], [71, 4, 1, "", "default_target_labels"], [71, 3, 1, "", "forward"], [71, 3, 1, "", "inference"], [71, 4, 1, "", "nb_inputs"], [71, 3, 1, "", "train_eval"]], "graphnet.models.utils": [[72, 5, 1, "", "calculate_distance_matrix"], [72, 5, 1, "", "calculate_xyzt_homophily"], [72, 5, 1, "", "knn_graph_batch"]], "graphnet.pisa": [[74, 0, 0, "-", "fitting"], [75, 0, 0, "-", "plotting"]], "graphnet.pisa.fitting": [[74, 1, 1, "", "ContourFitter"], [74, 1, 1, "", "WeightFitter"], [74, 5, 1, "", "config_updater"]], "graphnet.pisa.fitting.ContourFitter": [[74, 3, 1, "", "fit_1d_contour"], [74, 3, 1, "", "fit_2d_contour"]], "graphnet.pisa.fitting.WeightFitter": [[74, 3, 1, "", "fit_weights"]], "graphnet.pisa.plotting": [[75, 5, 1, "", "plot_1D_contour"], [75, 5, 1, "", "plot_2D_contour"], [75, 5, 1, "", "read_entry"]], "graphnet.training": [[77, 0, 0, "-", "callbacks"], [78, 0, 0, "-", "labels"], [79, 0, 0, "-", "loss_functions"], [80, 0, 0, "-", "utils"], [81, 0, 0, "-", "weight_fitting"]], "graphnet.training.callbacks": [[77, 1, 1, "", "PiecewiseLinearLR"], [77, 1, 1, "", "ProgressBar"]], "graphnet.training.callbacks.PiecewiseLinearLR": [[77, 3, 1, "", "get_lr"]], "graphnet.training.callbacks.ProgressBar": [[77, 3, 1, "", "get_metrics"], [77, 3, 1, "", "init_predict_tqdm"], [77, 3, 1, "", "init_test_tqdm"], [77, 3, 1, "", "init_train_tqdm"], [77, 3, 1, "", "init_validation_tqdm"], [77, 3, 1, "", "on_train_epoch_end"], [77, 3, 1, "", "on_train_epoch_start"]], "graphnet.training.labels": [[78, 1, 1, "", "Direction"], [78, 1, 1, "", "Label"]], "graphnet.training.labels.Label": [[78, 4, 1, "", "key"]], "graphnet.training.loss_functions": [[79, 1, 1, "", "BinaryCrossEntropyLoss"], [79, 1, 1, "", "CrossEntropyLoss"], [79, 1, 1, "", "EuclideanDistanceLoss"], [79, 1, 1, "", "LogCMK"], [79, 1, 1, "", "LogCoshLoss"], [79, 1, 1, "", "LossFunction"], [79, 1, 1, "", "MSELoss"], [79, 1, 1, "", "RMSELoss"], [79, 1, 1, "", "VonMisesFisher2DLoss"], [79, 1, 1, "", "VonMisesFisher3DLoss"], [79, 1, 1, "", "VonMisesFisherLoss"]], "graphnet.training.loss_functions.LogCMK": [[79, 3, 1, "", "backward"], [79, 3, 1, "", "forward"]], "graphnet.training.loss_functions.LossFunction": [[79, 3, 1, "", "forward"]], "graphnet.training.loss_functions.VonMisesFisherLoss": [[79, 3, 1, "", "log_cmk"], [79, 3, 1, "", "log_cmk_approx"], [79, 3, 1, "", "log_cmk_exact"]], "graphnet.training.utils": [[80, 5, 1, "", "collate_fn"], [80, 5, 1, "", "get_predictions"], [80, 5, 1, "", "make_dataloader"], [80, 5, 1, "", "make_train_validation_dataloader"], [80, 5, 1, "", "save_results"]], "graphnet.training.weight_fitting": [[81, 1, 1, "", "BjoernLow"], [81, 1, 1, "", "Uniform"], [81, 1, 1, "", "WeightFitter"]], "graphnet.training.weight_fitting.WeightFitter": [[81, 3, 1, "", "fit"]], "graphnet.utilities": [[83, 0, 0, "-", "argparse"], [84, 0, 0, "-", "config"], [91, 0, 0, "-", "decorators"], [92, 0, 0, "-", "filesys"], [93, 0, 0, "-", "imports"], [94, 0, 0, "-", "logging"], [95, 0, 0, "-", "maths"]], "graphnet.utilities.argparse": [[83, 1, 1, "", "ArgumentParser"], [83, 1, 1, "", "Options"]], "graphnet.utilities.argparse.ArgumentParser": [[83, 2, 1, "", "standard_arguments"], [83, 3, 1, "", "with_standard_arguments"]], "graphnet.utilities.argparse.Options": [[83, 3, 1, "", "contains"], [83, 3, 1, "", "pop_default"]], "graphnet.utilities.config": [[85, 0, 0, "-", "base_config"], [86, 0, 0, "-", "configurable"], [87, 0, 0, "-", "dataset_config"], [88, 0, 0, "-", "model_config"], [89, 0, 0, "-", "parsing"], [90, 0, 0, "-", "training_config"]], "graphnet.utilities.config.base_config": [[85, 1, 1, "", "BaseConfig"], [85, 5, 1, "", "get_all_argument_values"]], "graphnet.utilities.config.base_config.BaseConfig": [[85, 3, 1, "", "as_dict"], [85, 3, 1, "", "dump"], [85, 3, 1, "", "load"], [85, 2, 1, "", "model_config"], [85, 2, 1, "", "model_fields"]], "graphnet.utilities.config.configurable": [[86, 1, 1, "", "Configurable"]], "graphnet.utilities.config.configurable.Configurable": [[86, 4, 1, "", "config"], [86, 3, 1, "", "from_config"], [86, 3, 1, "", "save_config"]], "graphnet.utilities.config.dataset_config": [[87, 1, 1, "", "DatasetConfig"], [87, 1, 1, "", "DatasetConfigSaverABCMeta"], [87, 1, 1, "", "DatasetConfigSaverMeta"], [87, 5, 1, "", "save_dataset_config"]], "graphnet.utilities.config.dataset_config.DatasetConfig": [[87, 3, 1, "", "as_dict"], [87, 2, 1, "", "features"], [87, 2, 1, "", "graph_definition"], [87, 2, 1, "", "index_column"], [87, 2, 1, "", "loss_weight_column"], [87, 2, 1, "", "loss_weight_default_value"], [87, 2, 1, "", "loss_weight_table"], [87, 2, 1, "", "model_config"], [87, 2, 1, "", "model_fields"], [87, 2, 1, "", "node_truth"], [87, 2, 1, "", "node_truth_table"], [87, 2, 1, "", "path"], [87, 2, 1, "", "pulsemaps"], [87, 2, 1, "", "seed"], [87, 2, 1, "", "selection"], [87, 2, 1, "", "string_selection"], [87, 2, 1, "", "truth"], [87, 2, 1, "", "truth_table"]], "graphnet.utilities.config.model_config": [[88, 1, 1, "", "ModelConfig"], [88, 1, 1, "", "ModelConfigSaverABC"], [88, 1, 1, "", "ModelConfigSaverMeta"], [88, 5, 1, "", "save_model_config"]], "graphnet.utilities.config.model_config.ModelConfig": [[88, 2, 1, "", "arguments"], [88, 3, 1, "", "as_dict"], [88, 2, 1, "", "class_name"], [88, 2, 1, "", "model_config"], [88, 2, 1, "", "model_fields"]], "graphnet.utilities.config.parsing": [[89, 5, 1, "", "get_all_grapnet_classes"], [89, 5, 1, "", "get_graphnet_classes"], [89, 5, 1, "", "is_graphnet_class"], [89, 5, 1, "", "is_graphnet_module"], [89, 5, 1, "", "list_all_submodules"], [89, 5, 1, "", "traverse_and_apply"]], "graphnet.utilities.config.training_config": [[90, 1, 1, "", "TrainingConfig"]], "graphnet.utilities.config.training_config.TrainingConfig": [[90, 2, 1, "", "dataloader"], [90, 2, 1, "", "early_stopping_patience"], [90, 2, 1, "", "fit"], [90, 2, 1, "", "model_config"], [90, 2, 1, "", "model_fields"], [90, 2, 1, "", "target"]], "graphnet.utilities.filesys": [[92, 5, 1, "", "find_i3_files"], [92, 5, 1, "", "has_extension"], [92, 5, 1, "", "is_gcd_file"], [92, 5, 1, "", "is_i3_file"]], "graphnet.utilities.imports": [[93, 5, 1, "", "has_icecube_package"], [93, 5, 1, "", "has_pisa_package"], [93, 5, 1, "", "has_torch_package"], [93, 5, 1, "", "requires_icecube"]], "graphnet.utilities.logging": [[94, 1, 1, "", "Logger"], [94, 1, 1, "", "RepeatFilter"]], "graphnet.utilities.logging.Logger": [[94, 3, 1, "", "critical"], [94, 3, 1, "", "debug"], [94, 3, 1, "", "error"], [94, 4, 1, "", "file_handlers"], [94, 4, 1, "", "handlers"], [94, 3, 1, "", "info"], [94, 3, 1, "", "setLevel"], [94, 4, 1, "", "stream_handlers"], [94, 3, 1, "", "warning"], [94, 3, 1, "", "warning_once"]], "graphnet.utilities.logging.RepeatFilter": [[94, 3, 1, "", "filter"], [94, 2, 1, "", "nb_repeats_allowed"]], "graphnet.utilities.maths": [[95, 5, 1, "", "eps_like"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:attribute", "3": "py:method", "4": "py:property", "5": "py:function", "6": "py:exception"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "attribute", "Python attribute"], "3": ["py", "method", "Python method"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"], "6": ["py", "exception", "Python exception"]}, "titleterms": {"about": [0, 98], "impact": [0, 98], "usag": [0, 98], "acknowledg": [0, 98], "api": 1, "constant": [2, 4], "data": 3, "dataconvert": 5, "dataload": 6, "dataset": [7, 8], "parquet": [9, 30], "parquet_dataset": 10, "sqlite": [11, 33], "sqlite_dataset": 12, "extractor": 13, "i3extractor": 14, "i3featureextractor": 15, "i3genericextractor": 16, "i3hybridrecoextractor": 17, "i3ntmuonlabelsextractor": 18, "i3particleextractor": 19, "i3pisaextractor": 20, "i3quesoextractor": 21, "i3retroextractor": 22, "i3splinempeextractor": 23, "i3truthextractor": 24, "i3tumextractor": 25, "util": [26, 36, 72, 80, 82], "collect": 27, "frame": 28, "type": 29, "parquet_dataconvert": 31, "pipelin": 32, "sqlite_dataconvert": 34, "sqlite_util": 35, "parquet_to_sqlit": 37, "random": 38, "string_selection_resolv": 39, "deploy": [40, 42], "i3modul": 41, "graphnet_modul": 43, "model": [44, 66], "coarsen": 45, "compon": 46, "layer": 47, "pool": 48, "detector": [49, 50], "icecub": 51, "prometheu": 52, "gnn": [53, 58], "convnet": 54, "dynedg": 55, "dynedge_jinst": 56, "dynedge_kaggle_tito": 57, "graph": [59, 63], "edg": [60, 61], "graph_definit": 62, "node": [64, 65], "standard_model": 67, "task": [68, 71], "classif": 69, "reconstruct": 70, "pisa": 73, "fit": 74, "plot": 75, "train": 76, "callback": 77, "label": 78, "loss_funct": 79, "weight_fit": 81, "argpars": 83, "config": 84, "base_config": 85, "configur": 86, "dataset_config": 87, "model_config": 88, "pars": 89, "training_config": 90, "decor": 91, "filesi": 92, "import": 93, "log": 94, "math": 95, "src": 96, "contribut": 97, "github": 97, "issu": 97, "pull": 97, "request": 97, "convent": 97, "code": 97, "qualiti": 97, "instal": 99, "icetrai": 99, "stand": 99, "alon": 99, "run": 99, "docker": 99}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"About": [[0, "about"], [98, "about"]], "Impact": [[0, "impact"], [98, "impact"]], "Usage": [[0, "usage"], [98, "usage"]], "Acknowledgements": [[0, "acknowledgements"], [98, "acknowledgements"]], "API": [[1, "module-graphnet"]], "constants": [[2, "module-graphnet.constants"], [4, "module-graphnet.data.constants"]], "data": [[3, "module-graphnet.data"]], "dataconverter": [[5, "module-graphnet.data.dataconverter"]], "dataloader": [[6, "module-graphnet.data.dataloader"]], "dataset": [[7, "module-graphnet.data.dataset"], [8, "module-graphnet.data.dataset.dataset"]], "parquet": [[9, "module-graphnet.data.dataset.parquet"], [30, "module-graphnet.data.parquet"]], "parquet_dataset": [[10, "module-graphnet.data.dataset.parquet.parquet_dataset"]], "sqlite": [[11, "module-graphnet.data.dataset.sqlite"], [33, "module-graphnet.data.sqlite"]], "sqlite_dataset": [[12, "module-graphnet.data.dataset.sqlite.sqlite_dataset"]], "extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "utilities": [[26, "module-graphnet.data.extractors.utilities"], [36, "module-graphnet.data.utilities"], [82, "module-graphnet.utilities"]], "collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "types": [[29, "module-graphnet.data.extractors.utilities.types"]], "parquet_dataconverter": [[31, "module-graphnet.data.parquet.parquet_dataconverter"]], "pipeline": [[32, "module-graphnet.data.pipeline"]], "sqlite_dataconverter": [[34, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "sqlite_utilities": [[35, "module-graphnet.data.sqlite.sqlite_utilities"]], "parquet_to_sqlite": [[37, "module-graphnet.data.utilities.parquet_to_sqlite"]], "random": [[38, "module-graphnet.data.utilities.random"]], "string_selection_resolver": [[39, "module-graphnet.data.utilities.string_selection_resolver"]], "deployment": [[40, "module-graphnet.deployment"]], "i3modules": [[41, "i3modules"]], "deployer": [[42, "deployer"]], "graphnet_module": [[43, "module-graphnet.deployment.i3modules.graphnet_module"]], "models": [[44, "module-graphnet.models"]], "coarsening": [[45, "module-graphnet.models.coarsening"]], "components": [[46, "module-graphnet.models.components"]], "layers": [[47, "module-graphnet.models.components.layers"]], "pool": [[48, "module-graphnet.models.components.pool"]], "detector": [[49, "module-graphnet.models.detector"], [50, "module-graphnet.models.detector.detector"]], "icecube": [[51, "module-graphnet.models.detector.icecube"]], "prometheus": [[52, "module-graphnet.models.detector.prometheus"]], "gnn": [[53, "module-graphnet.models.gnn"], [58, "module-graphnet.models.gnn.gnn"]], "convnet": [[54, "module-graphnet.models.gnn.convnet"]], "dynedge": [[55, "module-graphnet.models.gnn.dynedge"]], "dynedge_jinst": [[56, "module-graphnet.models.gnn.dynedge_jinst"]], "dynedge_kaggle_tito": [[57, "module-graphnet.models.gnn.dynedge_kaggle_tito"]], "graphs": [[59, "module-graphnet.models.graphs"], [63, "module-graphnet.models.graphs.graphs"]], "edges": [[60, "module-graphnet.models.graphs.edges"], [61, "module-graphnet.models.graphs.edges.edges"]], "graph_definition": [[62, "module-graphnet.models.graphs.graph_definition"]], "nodes": [[64, "module-graphnet.models.graphs.nodes"], [65, "module-graphnet.models.graphs.nodes.nodes"]], "model": [[66, "module-graphnet.models.model"]], "standard_model": [[67, "module-graphnet.models.standard_model"]], "task": [[68, "module-graphnet.models.task"], [71, "module-graphnet.models.task.task"]], "classification": [[69, "module-graphnet.models.task.classification"]], "reconstruction": [[70, "module-graphnet.models.task.reconstruction"]], "utils": [[72, "module-graphnet.models.utils"], [80, "module-graphnet.training.utils"]], "pisa": [[73, "module-graphnet.pisa"]], "fitting": [[74, "module-graphnet.pisa.fitting"]], "plotting": [[75, "module-graphnet.pisa.plotting"]], "training": [[76, "module-graphnet.training"]], "callbacks": [[77, "module-graphnet.training.callbacks"]], "labels": [[78, "module-graphnet.training.labels"]], "loss_functions": [[79, "module-graphnet.training.loss_functions"]], "weight_fitting": [[81, "module-graphnet.training.weight_fitting"]], "argparse": [[83, "module-graphnet.utilities.argparse"]], "config": [[84, "module-graphnet.utilities.config"]], "base_config": [[85, "module-graphnet.utilities.config.base_config"]], "configurable": [[86, "module-graphnet.utilities.config.configurable"]], "dataset_config": [[87, "module-graphnet.utilities.config.dataset_config"]], "model_config": [[88, "module-graphnet.utilities.config.model_config"]], "parsing": [[89, "module-graphnet.utilities.config.parsing"]], "training_config": [[90, "module-graphnet.utilities.config.training_config"]], "decorators": [[91, "module-graphnet.utilities.decorators"]], "filesys": [[92, "module-graphnet.utilities.filesys"]], "imports": [[93, "module-graphnet.utilities.imports"]], "logging": [[94, "module-graphnet.utilities.logging"]], "maths": [[95, "module-graphnet.utilities.maths"]], "src": [[96, "src"]], "Contribute": [[97, "contribute"]], "GitHub issues": [[97, "github-issues"]], "Pull requests": [[97, "pull-requests"]], "Conventions": [[97, "conventions"]], "Code quality": [[97, "code-quality"]], "Install": [[99, "install"]], "Installing with IceTray": [[99, "installing-with-icetray"]], "Installing stand-alone": [[99, "installing-stand-alone"]], "Running in Docker": [[99, "running-in-docker"]]}, "indexentries": {"graphnet": [[1, "module-graphnet"]], "module": [[1, "module-graphnet"], [2, "module-graphnet.constants"], [3, "module-graphnet.data"], [4, "module-graphnet.data.constants"], [5, "module-graphnet.data.dataconverter"], [6, "module-graphnet.data.dataloader"], [7, "module-graphnet.data.dataset"], [8, "module-graphnet.data.dataset.dataset"], [9, "module-graphnet.data.dataset.parquet"], [10, "module-graphnet.data.dataset.parquet.parquet_dataset"], [11, "module-graphnet.data.dataset.sqlite"], [12, "module-graphnet.data.dataset.sqlite.sqlite_dataset"], [13, "module-graphnet.data.extractors"], [14, "module-graphnet.data.extractors.i3extractor"], [15, "module-graphnet.data.extractors.i3featureextractor"], [16, "module-graphnet.data.extractors.i3genericextractor"], [17, "module-graphnet.data.extractors.i3hybridrecoextractor"], [18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"], [19, "module-graphnet.data.extractors.i3particleextractor"], [20, "module-graphnet.data.extractors.i3pisaextractor"], [21, "module-graphnet.data.extractors.i3quesoextractor"], [22, "module-graphnet.data.extractors.i3retroextractor"], [23, "module-graphnet.data.extractors.i3splinempeextractor"], [24, "module-graphnet.data.extractors.i3truthextractor"], [25, "module-graphnet.data.extractors.i3tumextractor"], [26, "module-graphnet.data.extractors.utilities"], [27, "module-graphnet.data.extractors.utilities.collections"], [28, "module-graphnet.data.extractors.utilities.frames"], [29, "module-graphnet.data.extractors.utilities.types"], [30, "module-graphnet.data.parquet"], [31, "module-graphnet.data.parquet.parquet_dataconverter"], [32, "module-graphnet.data.pipeline"], [33, "module-graphnet.data.sqlite"], [34, "module-graphnet.data.sqlite.sqlite_dataconverter"], [35, "module-graphnet.data.sqlite.sqlite_utilities"], [36, "module-graphnet.data.utilities"], [37, "module-graphnet.data.utilities.parquet_to_sqlite"], [38, "module-graphnet.data.utilities.random"], [39, "module-graphnet.data.utilities.string_selection_resolver"], [40, "module-graphnet.deployment"], [43, "module-graphnet.deployment.i3modules.graphnet_module"], [44, "module-graphnet.models"], [45, "module-graphnet.models.coarsening"], [46, "module-graphnet.models.components"], [47, "module-graphnet.models.components.layers"], [48, "module-graphnet.models.components.pool"], [49, "module-graphnet.models.detector"], [50, "module-graphnet.models.detector.detector"], [51, "module-graphnet.models.detector.icecube"], [52, "module-graphnet.models.detector.prometheus"], [53, "module-graphnet.models.gnn"], [54, "module-graphnet.models.gnn.convnet"], [55, "module-graphnet.models.gnn.dynedge"], [56, "module-graphnet.models.gnn.dynedge_jinst"], [57, "module-graphnet.models.gnn.dynedge_kaggle_tito"], [58, "module-graphnet.models.gnn.gnn"], [59, "module-graphnet.models.graphs"], [60, "module-graphnet.models.graphs.edges"], [61, "module-graphnet.models.graphs.edges.edges"], [62, "module-graphnet.models.graphs.graph_definition"], [63, "module-graphnet.models.graphs.graphs"], [64, "module-graphnet.models.graphs.nodes"], [65, "module-graphnet.models.graphs.nodes.nodes"], [66, "module-graphnet.models.model"], [67, "module-graphnet.models.standard_model"], [68, "module-graphnet.models.task"], [69, "module-graphnet.models.task.classification"], [70, "module-graphnet.models.task.reconstruction"], [71, "module-graphnet.models.task.task"], [72, "module-graphnet.models.utils"], [73, "module-graphnet.pisa"], [74, "module-graphnet.pisa.fitting"], [75, "module-graphnet.pisa.plotting"], [76, "module-graphnet.training"], [77, "module-graphnet.training.callbacks"], [78, "module-graphnet.training.labels"], [79, "module-graphnet.training.loss_functions"], [80, "module-graphnet.training.utils"], [81, "module-graphnet.training.weight_fitting"], [82, "module-graphnet.utilities"], [83, "module-graphnet.utilities.argparse"], [84, "module-graphnet.utilities.config"], [85, "module-graphnet.utilities.config.base_config"], [86, "module-graphnet.utilities.config.configurable"], [87, "module-graphnet.utilities.config.dataset_config"], [88, "module-graphnet.utilities.config.model_config"], [89, "module-graphnet.utilities.config.parsing"], [90, "module-graphnet.utilities.config.training_config"], [91, "module-graphnet.utilities.decorators"], [92, "module-graphnet.utilities.filesys"], [93, "module-graphnet.utilities.imports"], [94, "module-graphnet.utilities.logging"], [95, "module-graphnet.utilities.maths"]], "graphnet.constants": [[2, "module-graphnet.constants"]], "graphnet.data": [[3, "module-graphnet.data"]], "deepcore (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.DEEPCORE"]], "deepcore (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.DEEPCORE"]], "features (class in graphnet.data.constants)": [[4, "graphnet.data.constants.FEATURES"]], "icecube86 (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.ICECUBE86"]], "icecube86 (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.ICECUBE86"]], "kaggle (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.KAGGLE"]], "kaggle (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.KAGGLE"]], "prometheus (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.PROMETHEUS"]], "prometheus (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.PROMETHEUS"]], "truth (class in graphnet.data.constants)": [[4, "graphnet.data.constants.TRUTH"]], "upgrade (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.UPGRADE"]], "upgrade (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.UPGRADE"]], "graphnet.data.constants": [[4, "module-graphnet.data.constants"]], "dataconverter (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.DataConverter"]], "fileset (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.FileSet"]], "cache_output_files() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.cache_output_files"]], "execute() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.execute"]], "file_suffix (graphnet.data.dataconverter.dataconverter property)": [[5, "graphnet.data.dataconverter.DataConverter.file_suffix"]], "gcd_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.gcd_file"]], "get_map_function() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.get_map_function"]], "graphnet.data.dataconverter": [[5, "module-graphnet.data.dataconverter"]], "i3_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.i3_file"]], "init_global_index() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.init_global_index"]], "merge_files() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.merge_files"]], "save_data() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.save_data"]], "dataloader (class in graphnet.data.dataloader)": [[6, "graphnet.data.dataloader.DataLoader"]], "collate_fn() (in module graphnet.data.dataloader)": [[6, "graphnet.data.dataloader.collate_fn"]], "do_shuffle() (in module graphnet.data.dataloader)": [[6, "graphnet.data.dataloader.do_shuffle"]], "from_dataset_config() (graphnet.data.dataloader.dataloader class method)": [[6, "graphnet.data.dataloader.DataLoader.from_dataset_config"]], "graphnet.data.dataloader": [[6, "module-graphnet.data.dataloader"]], "graphnet.data.dataset": [[7, "module-graphnet.data.dataset"]], "columnmissingexception": [[8, "graphnet.data.dataset.dataset.ColumnMissingException"]], "dataset (class in graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.Dataset"]], "ensembledataset (class in graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.EnsembleDataset"]], "add_label() (graphnet.data.dataset.dataset.dataset method)": [[8, "graphnet.data.dataset.dataset.Dataset.add_label"]], "concatenate() (graphnet.data.dataset.dataset.dataset class method)": [[8, "graphnet.data.dataset.dataset.Dataset.concatenate"]], "from_config() (graphnet.data.dataset.dataset.dataset class method)": [[8, "graphnet.data.dataset.dataset.Dataset.from_config"]], "graphnet.data.dataset.dataset": [[8, "module-graphnet.data.dataset.dataset"]], "load_module() (in module graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.load_module"]], "parse_graph_definition() (in module graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.parse_graph_definition"]], "path (graphnet.data.dataset.dataset.dataset property)": [[8, "graphnet.data.dataset.dataset.Dataset.path"]], "query_table() (graphnet.data.dataset.dataset.dataset method)": [[8, "graphnet.data.dataset.dataset.Dataset.query_table"]], "truth_table (graphnet.data.dataset.dataset.dataset property)": [[8, "graphnet.data.dataset.dataset.Dataset.truth_table"]], "graphnet.data.dataset.parquet": [[9, "module-graphnet.data.dataset.parquet"]], "parquetdataset (class in graphnet.data.dataset.parquet.parquet_dataset)": [[10, "graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset"]], "graphnet.data.dataset.parquet.parquet_dataset": [[10, "module-graphnet.data.dataset.parquet.parquet_dataset"]], "query_table() (graphnet.data.dataset.parquet.parquet_dataset.parquetdataset method)": [[10, "graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset.query_table"]], "graphnet.data.dataset.sqlite": [[11, "module-graphnet.data.dataset.sqlite"]], "sqlitedataset (class in graphnet.data.dataset.sqlite.sqlite_dataset)": [[12, "graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset"]], "graphnet.data.dataset.sqlite.sqlite_dataset": [[12, "module-graphnet.data.dataset.sqlite.sqlite_dataset"]], "query_table() (graphnet.data.dataset.sqlite.sqlite_dataset.sqlitedataset method)": [[12, "graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset.query_table"]], "graphnet.data.extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor"]], "i3extractorcollection (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "name (graphnet.data.extractors.i3extractor.i3extractor property)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.name"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractor method)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.set_files"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractorcollection method)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection.set_files"]], "i3featureextractor (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractor"]], "i3featureextractoricecube86 (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCube86"]], "i3featureextractoricecubedeepcore (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeDeepCore"]], "i3featureextractoricecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeUpgrade"]], "i3pulsenoisetruthflagicecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor (class in graphnet.data.extractors.i3genericextractor)": [[16, "graphnet.data.extractors.i3genericextractor.I3GenericExtractor"]], "graphnet.data.extractors.i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3galacticplanehybridrecoextractor (class in graphnet.data.extractors.i3hybridrecoextractor)": [[17, "graphnet.data.extractors.i3hybridrecoextractor.I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelextractor (class in graphnet.data.extractors.i3ntmuonlabelsextractor)": [[18, "graphnet.data.extractors.i3ntmuonlabelsextractor.I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor (class in graphnet.data.extractors.i3particleextractor)": [[19, "graphnet.data.extractors.i3particleextractor.I3ParticleExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor (class in graphnet.data.extractors.i3pisaextractor)": [[20, "graphnet.data.extractors.i3pisaextractor.I3PISAExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor (class in graphnet.data.extractors.i3quesoextractor)": [[21, "graphnet.data.extractors.i3quesoextractor.I3QUESOExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor (class in graphnet.data.extractors.i3retroextractor)": [[22, "graphnet.data.extractors.i3retroextractor.I3RetroExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeicextractor (class in graphnet.data.extractors.i3splinempeextractor)": [[23, "graphnet.data.extractors.i3splinempeextractor.I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor (class in graphnet.data.extractors.i3truthextractor)": [[24, "graphnet.data.extractors.i3truthextractor.I3TruthExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor (class in graphnet.data.extractors.i3tumextractor)": [[25, "graphnet.data.extractors.i3tumextractor.I3TUMExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "graphnet.data.extractors.utilities": [[26, "module-graphnet.data.extractors.utilities"]], "flatten_nested_dictionary() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.flatten_nested_dictionary"]], "graphnet.data.extractors.utilities.collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "serialise() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.serialise"]], "transpose_list_of_dicts() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.transpose_list_of_dicts"]], "frame_is_montecarlo() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_montecarlo"]], "frame_is_noise() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_noise"]], "get_om_keys_and_pulseseries() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "break_cyclic_recursion() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.break_cyclic_recursion"]], "cast_object_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_object_to_pure_python"]], "cast_pulse_series_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_pulse_series_to_pure_python"]], "get_member_variables() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.get_member_variables"]], "graphnet.data.extractors.utilities.types": [[29, "module-graphnet.data.extractors.utilities.types"]], "is_boost_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_class"]], "is_boost_enum() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_enum"]], "is_icecube_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_icecube_class"]], "is_method() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_method"]], "is_type() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_type"]], "graphnet.data.parquet": [[30, "module-graphnet.data.parquet"]], "parquetdataconverter (class in graphnet.data.parquet.parquet_dataconverter)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter"]], "file_suffix (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter attribute)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.file_suffix"]], "graphnet.data.parquet.parquet_dataconverter": [[31, "module-graphnet.data.parquet.parquet_dataconverter"]], "merge_files() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.merge_files"]], "save_data() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.save_data"]], "insqlitepipeline (class in graphnet.data.pipeline)": [[32, "graphnet.data.pipeline.InSQLitePipeline"]], "graphnet.data.pipeline": [[32, "module-graphnet.data.pipeline"]], "graphnet.data.sqlite": [[33, "module-graphnet.data.sqlite"]], "sqlitedataconverter (class in graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter"]], "any_pulsemap_is_non_empty() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.any_pulsemap_is_non_empty"]], "construct_dataframe() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.construct_dataframe"]], "file_suffix (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter attribute)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.file_suffix"]], "graphnet.data.sqlite.sqlite_dataconverter": [[34, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "is_mc_tree() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.is_mc_tree"]], "is_pulse_map() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.is_pulse_map"]], "merge_files() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.merge_files"]], "save_data() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.save_data"]], "attach_index() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.attach_index"]], "create_table() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.create_table"]], "create_table_and_save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.create_table_and_save_to_sql"]], "database_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.database_exists"]], "database_table_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.database_table_exists"]], "graphnet.data.sqlite.sqlite_utilities": [[35, "module-graphnet.data.sqlite.sqlite_utilities"]], "run_sql_code() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.run_sql_code"]], "save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.save_to_sql"]], "graphnet.data.utilities": [[36, "module-graphnet.data.utilities"]], "parquettosqliteconverter (class in graphnet.data.utilities.parquet_to_sqlite)": [[37, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite": [[37, "module-graphnet.data.utilities.parquet_to_sqlite"]], "run() (graphnet.data.utilities.parquet_to_sqlite.parquettosqliteconverter method)": [[37, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter.run"]], "graphnet.data.utilities.random": [[38, "module-graphnet.data.utilities.random"]], "pairwise_shuffle() (in module graphnet.data.utilities.random)": [[38, "graphnet.data.utilities.random.pairwise_shuffle"]], "stringselectionresolver (class in graphnet.data.utilities.string_selection_resolver)": [[39, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver": [[39, "module-graphnet.data.utilities.string_selection_resolver"]], "resolve() (graphnet.data.utilities.string_selection_resolver.stringselectionresolver method)": [[39, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver.resolve"]], "graphnet.deployment": [[40, "module-graphnet.deployment"]], "graphneti3module (class in graphnet.deployment.i3modules.graphnet_module)": [[43, "graphnet.deployment.i3modules.graphnet_module.GraphNeTI3Module"]], "i3inferencemodule (class in graphnet.deployment.i3modules.graphnet_module)": [[43, "graphnet.deployment.i3modules.graphnet_module.I3InferenceModule"]], "i3pulsecleanermodule (class in graphnet.deployment.i3modules.graphnet_module)": [[43, "graphnet.deployment.i3modules.graphnet_module.I3PulseCleanerModule"]], "graphnet.deployment.i3modules.graphnet_module": [[43, "module-graphnet.deployment.i3modules.graphnet_module"]], "graphnet.models": [[44, "module-graphnet.models"]], "attributecoarsening (class in graphnet.models.coarsening)": [[45, "graphnet.models.coarsening.AttributeCoarsening"]], "coarsening (class in graphnet.models.coarsening)": [[45, "graphnet.models.coarsening.Coarsening"]], "customdomcoarsening (class in graphnet.models.coarsening)": [[45, "graphnet.models.coarsening.CustomDOMCoarsening"]], "domandtimewindowcoarsening (class in graphnet.models.coarsening)": [[45, "graphnet.models.coarsening.DOMAndTimeWindowCoarsening"]], "domcoarsening (class in graphnet.models.coarsening)": [[45, "graphnet.models.coarsening.DOMCoarsening"]], "forward() (graphnet.models.coarsening.coarsening method)": [[45, "graphnet.models.coarsening.Coarsening.forward"]], "graphnet.models.coarsening": [[45, "module-graphnet.models.coarsening"]], "reduce_options (graphnet.models.coarsening.coarsening attribute)": [[45, "graphnet.models.coarsening.Coarsening.reduce_options"]], "unbatch_edge_index() (in module graphnet.models.coarsening)": [[45, "graphnet.models.coarsening.unbatch_edge_index"]], "graphnet.models.components": [[46, "module-graphnet.models.components"]], "dynedgeconv (class in graphnet.models.components.layers)": [[47, "graphnet.models.components.layers.DynEdgeConv"]], "dyntrans (class in graphnet.models.components.layers)": [[47, "graphnet.models.components.layers.DynTrans"]], "edgeconvtito (class in graphnet.models.components.layers)": [[47, "graphnet.models.components.layers.EdgeConvTito"]], "forward() (graphnet.models.components.layers.dynedgeconv method)": [[47, "graphnet.models.components.layers.DynEdgeConv.forward"]], "forward() (graphnet.models.components.layers.dyntrans method)": [[47, "graphnet.models.components.layers.DynTrans.forward"]], "forward() (graphnet.models.components.layers.edgeconvtito method)": [[47, "graphnet.models.components.layers.EdgeConvTito.forward"]], "graphnet.models.components.layers": [[47, "module-graphnet.models.components.layers"]], "message() (graphnet.models.components.layers.edgeconvtito method)": [[47, "graphnet.models.components.layers.EdgeConvTito.message"]], "reset_parameters() (graphnet.models.components.layers.edgeconvtito method)": [[47, "graphnet.models.components.layers.EdgeConvTito.reset_parameters"]], "graphnet.models.components.pool": [[48, "module-graphnet.models.components.pool"]], "group_by() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.group_by"]], "group_pulses_to_dom() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.group_pulses_to_dom"]], "group_pulses_to_pmt() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.group_pulses_to_pmt"]], "min_pool() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.min_pool"]], "min_pool_x() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.min_pool_x"]], "std_pool() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.std_pool"]], "std_pool_x() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.std_pool_x"]], "sum_pool() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.sum_pool"]], "sum_pool_and_distribute() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.sum_pool_and_distribute"]], "sum_pool_x() (in module graphnet.models.components.pool)": [[48, "graphnet.models.components.pool.sum_pool_x"]], "graphnet.models.detector": [[49, "module-graphnet.models.detector"]], "detector (class in graphnet.models.detector.detector)": [[50, "graphnet.models.detector.detector.Detector"]], "feature_map() (graphnet.models.detector.detector.detector method)": [[50, "graphnet.models.detector.detector.Detector.feature_map"]], "forward() (graphnet.models.detector.detector.detector method)": [[50, "graphnet.models.detector.detector.Detector.forward"]], "graphnet.models.detector.detector": [[50, "module-graphnet.models.detector.detector"]], "icecube86 (class in graphnet.models.detector.icecube)": [[51, "graphnet.models.detector.icecube.IceCube86"]], "icecubedeepcore (class in graphnet.models.detector.icecube)": [[51, "graphnet.models.detector.icecube.IceCubeDeepCore"]], "icecubekaggle (class in graphnet.models.detector.icecube)": [[51, "graphnet.models.detector.icecube.IceCubeKaggle"]], "icecubeupgrade (class in graphnet.models.detector.icecube)": [[51, "graphnet.models.detector.icecube.IceCubeUpgrade"]], "feature_map() (graphnet.models.detector.icecube.icecube86 method)": [[51, "graphnet.models.detector.icecube.IceCube86.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubedeepcore method)": [[51, "graphnet.models.detector.icecube.IceCubeDeepCore.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubekaggle method)": [[51, "graphnet.models.detector.icecube.IceCubeKaggle.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubeupgrade method)": [[51, "graphnet.models.detector.icecube.IceCubeUpgrade.feature_map"]], "graphnet.models.detector.icecube": [[51, "module-graphnet.models.detector.icecube"]], "prometheus (class in graphnet.models.detector.prometheus)": [[52, "graphnet.models.detector.prometheus.Prometheus"]], "feature_map() (graphnet.models.detector.prometheus.prometheus method)": [[52, "graphnet.models.detector.prometheus.Prometheus.feature_map"]], "graphnet.models.detector.prometheus": [[52, "module-graphnet.models.detector.prometheus"]], "graphnet.models.gnn": [[53, "module-graphnet.models.gnn"]], "convnet (class in graphnet.models.gnn.convnet)": [[54, "graphnet.models.gnn.convnet.ConvNet"]], "forward() (graphnet.models.gnn.convnet.convnet method)": [[54, "graphnet.models.gnn.convnet.ConvNet.forward"]], "graphnet.models.gnn.convnet": [[54, "module-graphnet.models.gnn.convnet"]], "dynedge (class in graphnet.models.gnn.dynedge)": [[55, "graphnet.models.gnn.dynedge.DynEdge"]], "forward() (graphnet.models.gnn.dynedge.dynedge method)": [[55, "graphnet.models.gnn.dynedge.DynEdge.forward"]], "graphnet.models.gnn.dynedge": [[55, "module-graphnet.models.gnn.dynedge"]], "dynedgejinst (class in graphnet.models.gnn.dynedge_jinst)": [[56, "graphnet.models.gnn.dynedge_jinst.DynEdgeJINST"]], "forward() (graphnet.models.gnn.dynedge_jinst.dynedgejinst method)": [[56, "graphnet.models.gnn.dynedge_jinst.DynEdgeJINST.forward"]], "graphnet.models.gnn.dynedge_jinst": [[56, "module-graphnet.models.gnn.dynedge_jinst"]], "dynedgetito (class in graphnet.models.gnn.dynedge_kaggle_tito)": [[57, "graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO"]], "forward() (graphnet.models.gnn.dynedge_kaggle_tito.dynedgetito method)": [[57, "graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO.forward"]], "graphnet.models.gnn.dynedge_kaggle_tito": [[57, "module-graphnet.models.gnn.dynedge_kaggle_tito"]], "gnn (class in graphnet.models.gnn.gnn)": [[58, "graphnet.models.gnn.gnn.GNN"]], "forward() (graphnet.models.gnn.gnn.gnn method)": [[58, "graphnet.models.gnn.gnn.GNN.forward"]], "graphnet.models.gnn.gnn": [[58, "module-graphnet.models.gnn.gnn"]], "nb_inputs (graphnet.models.gnn.gnn.gnn property)": [[58, "graphnet.models.gnn.gnn.GNN.nb_inputs"]], "nb_outputs (graphnet.models.gnn.gnn.gnn property)": [[58, "graphnet.models.gnn.gnn.GNN.nb_outputs"]], "graphnet.models.graphs": [[59, "module-graphnet.models.graphs"]], "graphnet.models.graphs.edges": [[60, "module-graphnet.models.graphs.edges"]], "edgedefinition (class in graphnet.models.graphs.edges.edges)": [[61, "graphnet.models.graphs.edges.edges.EdgeDefinition"]], "euclideanedges (class in graphnet.models.graphs.edges.edges)": [[61, "graphnet.models.graphs.edges.edges.EuclideanEdges"]], "knnedges (class in graphnet.models.graphs.edges.edges)": [[61, "graphnet.models.graphs.edges.edges.KNNEdges"]], "radialedges (class in graphnet.models.graphs.edges.edges)": [[61, "graphnet.models.graphs.edges.edges.RadialEdges"]], "forward() (graphnet.models.graphs.edges.edges.edgedefinition method)": [[61, "graphnet.models.graphs.edges.edges.EdgeDefinition.forward"]], "graphnet.models.graphs.edges.edges": [[61, "module-graphnet.models.graphs.edges.edges"]], "graphdefinition (class in graphnet.models.graphs.graph_definition)": [[62, "graphnet.models.graphs.graph_definition.GraphDefinition"]], "forward() (graphnet.models.graphs.graph_definition.graphdefinition method)": [[62, "graphnet.models.graphs.graph_definition.GraphDefinition.forward"]], "graphnet.models.graphs.graph_definition": [[62, "module-graphnet.models.graphs.graph_definition"]], "knngraph (class in graphnet.models.graphs.graphs)": [[63, "graphnet.models.graphs.graphs.KNNGraph"]], "graphnet.models.graphs.graphs": [[63, "module-graphnet.models.graphs.graphs"]], "graphnet.models.graphs.nodes": [[64, "module-graphnet.models.graphs.nodes"]], "nodedefinition (class in graphnet.models.graphs.nodes.nodes)": [[65, "graphnet.models.graphs.nodes.nodes.NodeDefinition"]], "nodesaspulses (class in graphnet.models.graphs.nodes.nodes)": [[65, "graphnet.models.graphs.nodes.nodes.NodesAsPulses"]], "forward() (graphnet.models.graphs.nodes.nodes.nodedefinition method)": [[65, "graphnet.models.graphs.nodes.nodes.NodeDefinition.forward"]], "graphnet.models.graphs.nodes.nodes": [[65, "module-graphnet.models.graphs.nodes.nodes"]], "nb_outputs (graphnet.models.graphs.nodes.nodes.nodedefinition property)": [[65, "graphnet.models.graphs.nodes.nodes.NodeDefinition.nb_outputs"]], "set_number_of_inputs() (graphnet.models.graphs.nodes.nodes.nodedefinition method)": [[65, "graphnet.models.graphs.nodes.nodes.NodeDefinition.set_number_of_inputs"]], "model (class in graphnet.models.model)": [[66, "graphnet.models.model.Model"]], "fit() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.fit"]], "forward() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.forward"]], "from_config() (graphnet.models.model.model class method)": [[66, "graphnet.models.model.Model.from_config"]], "graphnet.models.model": [[66, "module-graphnet.models.model"]], "load() (graphnet.models.model.model class method)": [[66, "graphnet.models.model.Model.load"]], "load_state_dict() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.load_state_dict"]], "predict() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.predict"]], "predict_as_dataframe() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.predict_as_dataframe"]], "save() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.save"]], "save_state_dict() (graphnet.models.model.model method)": [[66, "graphnet.models.model.Model.save_state_dict"]], "standardmodel (class in graphnet.models.standard_model)": [[67, "graphnet.models.standard_model.StandardModel"]], "compute_loss() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.compute_loss"]], "configure_optimizers() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.configure_optimizers"]], "forward() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.forward"]], "graphnet.models.standard_model": [[67, "module-graphnet.models.standard_model"]], "inference() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.inference"]], "predict() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.predict"]], "predict_as_dataframe() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.predict_as_dataframe"]], "prediction_labels (graphnet.models.standard_model.standardmodel property)": [[67, "graphnet.models.standard_model.StandardModel.prediction_labels"]], "shared_step() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.shared_step"]], "target_labels (graphnet.models.standard_model.standardmodel property)": [[67, "graphnet.models.standard_model.StandardModel.target_labels"]], "train() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.train"]], "training_step() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.training_step"]], "validation_step() (graphnet.models.standard_model.standardmodel method)": [[67, "graphnet.models.standard_model.StandardModel.validation_step"]], "graphnet.models.task": [[68, "module-graphnet.models.task"]], "binaryclassificationtask (class in graphnet.models.task.classification)": [[69, "graphnet.models.task.classification.BinaryClassificationTask"]], "binaryclassificationtasklogits (class in graphnet.models.task.classification)": [[69, "graphnet.models.task.classification.BinaryClassificationTaskLogits"]], "multiclassclassificationtask (class in graphnet.models.task.classification)": [[69, "graphnet.models.task.classification.MulticlassClassificationTask"]], "default_prediction_labels (graphnet.models.task.classification.binaryclassificationtask attribute)": [[69, "graphnet.models.task.classification.BinaryClassificationTask.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.classification.binaryclassificationtasklogits attribute)": [[69, "graphnet.models.task.classification.BinaryClassificationTaskLogits.default_prediction_labels"]], "default_target_labels (graphnet.models.task.classification.binaryclassificationtask attribute)": [[69, "graphnet.models.task.classification.BinaryClassificationTask.default_target_labels"]], "default_target_labels (graphnet.models.task.classification.binaryclassificationtasklogits attribute)": [[69, "graphnet.models.task.classification.BinaryClassificationTaskLogits.default_target_labels"]], "graphnet.models.task.classification": [[69, "module-graphnet.models.task.classification"]], "nb_inputs (graphnet.models.task.classification.binaryclassificationtask attribute)": [[69, "graphnet.models.task.classification.BinaryClassificationTask.nb_inputs"]], "nb_inputs (graphnet.models.task.classification.binaryclassificationtasklogits attribute)": [[69, "graphnet.models.task.classification.BinaryClassificationTaskLogits.nb_inputs"]], "azimuthreconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstruction"]], "azimuthreconstructionwithkappa (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa"]], "directionreconstructionwithkappa (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa"]], "energyreconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.EnergyReconstruction"]], "energyreconstructionwithpower (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower"]], "energyreconstructionwithuncertainty (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty"]], "inelasticityreconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.InelasticityReconstruction"]], "positionreconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.PositionReconstruction"]], "timereconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.TimeReconstruction"]], "vertexreconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.VertexReconstruction"]], "zenithreconstruction (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.ZenithReconstruction"]], "zenithreconstructionwithkappa (class in graphnet.models.task.reconstruction)": [[70, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa"]], "default_prediction_labels (graphnet.models.task.reconstruction.azimuthreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.azimuthreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.directionreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energyreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energyreconstructionwithpower attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energyreconstructionwithuncertainty attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.inelasticityreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.InelasticityReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.positionreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.PositionReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.timereconstruction attribute)": [[70, "graphnet.models.task.reconstruction.TimeReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.vertexreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.VertexReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.zenithreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.ZenithReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.zenithreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa.default_prediction_labels"]], "default_target_labels (graphnet.models.task.reconstruction.azimuthreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.azimuthreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.directionreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energyreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energyreconstructionwithpower attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energyreconstructionwithuncertainty attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.inelasticityreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.InelasticityReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.positionreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.PositionReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.timereconstruction attribute)": [[70, "graphnet.models.task.reconstruction.TimeReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.vertexreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.VertexReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.zenithreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.ZenithReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.zenithreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa.default_target_labels"]], "graphnet.models.task.reconstruction": [[70, "module-graphnet.models.task.reconstruction"]], "nb_inputs (graphnet.models.task.reconstruction.azimuthreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.azimuthreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.directionreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energyreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energyreconstructionwithpower attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energyreconstructionwithuncertainty attribute)": [[70, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.inelasticityreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.InelasticityReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.positionreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.PositionReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.timereconstruction attribute)": [[70, "graphnet.models.task.reconstruction.TimeReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.vertexreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.VertexReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.zenithreconstruction attribute)": [[70, "graphnet.models.task.reconstruction.ZenithReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.zenithreconstructionwithkappa attribute)": [[70, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa.nb_inputs"]], "identitytask (class in graphnet.models.task.task)": [[71, "graphnet.models.task.task.IdentityTask"]], "task (class in graphnet.models.task.task)": [[71, "graphnet.models.task.task.Task"]], "compute_loss() (graphnet.models.task.task.task method)": [[71, "graphnet.models.task.task.Task.compute_loss"]], "default_prediction_labels (graphnet.models.task.task.identitytask property)": [[71, "graphnet.models.task.task.IdentityTask.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.task.task property)": [[71, "graphnet.models.task.task.Task.default_prediction_labels"]], "default_target_labels (graphnet.models.task.task.identitytask property)": [[71, "graphnet.models.task.task.IdentityTask.default_target_labels"]], "default_target_labels (graphnet.models.task.task.task property)": [[71, "graphnet.models.task.task.Task.default_target_labels"]], "forward() (graphnet.models.task.task.task method)": [[71, "graphnet.models.task.task.Task.forward"]], "graphnet.models.task.task": [[71, "module-graphnet.models.task.task"]], "inference() (graphnet.models.task.task.task method)": [[71, "graphnet.models.task.task.Task.inference"]], "nb_inputs (graphnet.models.task.task.identitytask property)": [[71, "graphnet.models.task.task.IdentityTask.nb_inputs"]], "nb_inputs (graphnet.models.task.task.task property)": [[71, "graphnet.models.task.task.Task.nb_inputs"]], "train_eval() (graphnet.models.task.task.task method)": [[71, "graphnet.models.task.task.Task.train_eval"]], "calculate_distance_matrix() (in module graphnet.models.utils)": [[72, "graphnet.models.utils.calculate_distance_matrix"]], "calculate_xyzt_homophily() (in module graphnet.models.utils)": [[72, "graphnet.models.utils.calculate_xyzt_homophily"]], "graphnet.models.utils": [[72, "module-graphnet.models.utils"]], "knn_graph_batch() (in module graphnet.models.utils)": [[72, "graphnet.models.utils.knn_graph_batch"]], "graphnet.pisa": [[73, "module-graphnet.pisa"]], "contourfitter (class in graphnet.pisa.fitting)": [[74, "graphnet.pisa.fitting.ContourFitter"]], "weightfitter (class in graphnet.pisa.fitting)": [[74, "graphnet.pisa.fitting.WeightFitter"]], "config_updater() (in module graphnet.pisa.fitting)": [[74, "graphnet.pisa.fitting.config_updater"]], "fit_1d_contour() (graphnet.pisa.fitting.contourfitter method)": [[74, "graphnet.pisa.fitting.ContourFitter.fit_1d_contour"]], "fit_2d_contour() (graphnet.pisa.fitting.contourfitter method)": [[74, "graphnet.pisa.fitting.ContourFitter.fit_2d_contour"]], "fit_weights() (graphnet.pisa.fitting.weightfitter method)": [[74, "graphnet.pisa.fitting.WeightFitter.fit_weights"]], "graphnet.pisa.fitting": [[74, "module-graphnet.pisa.fitting"]], "graphnet.pisa.plotting": [[75, "module-graphnet.pisa.plotting"]], "plot_1d_contour() (in module graphnet.pisa.plotting)": [[75, "graphnet.pisa.plotting.plot_1D_contour"]], "plot_2d_contour() (in module graphnet.pisa.plotting)": [[75, "graphnet.pisa.plotting.plot_2D_contour"]], "read_entry() (in module graphnet.pisa.plotting)": [[75, "graphnet.pisa.plotting.read_entry"]], "graphnet.training": [[76, "module-graphnet.training"]], "piecewiselinearlr (class in graphnet.training.callbacks)": [[77, "graphnet.training.callbacks.PiecewiseLinearLR"]], "progressbar (class in graphnet.training.callbacks)": [[77, "graphnet.training.callbacks.ProgressBar"]], "get_lr() (graphnet.training.callbacks.piecewiselinearlr method)": [[77, "graphnet.training.callbacks.PiecewiseLinearLR.get_lr"]], "get_metrics() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.get_metrics"]], "graphnet.training.callbacks": [[77, "module-graphnet.training.callbacks"]], "init_predict_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_predict_tqdm"]], "init_test_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_test_tqdm"]], "init_train_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_train_tqdm"]], "init_validation_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_validation_tqdm"]], "on_train_epoch_end() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.on_train_epoch_end"]], "on_train_epoch_start() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.on_train_epoch_start"]], "direction (class in graphnet.training.labels)": [[78, "graphnet.training.labels.Direction"]], "label (class in graphnet.training.labels)": [[78, "graphnet.training.labels.Label"]], "graphnet.training.labels": [[78, "module-graphnet.training.labels"]], "key (graphnet.training.labels.label property)": [[78, "graphnet.training.labels.Label.key"]], "binarycrossentropyloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.BinaryCrossEntropyLoss"]], "crossentropyloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.CrossEntropyLoss"]], "euclideandistanceloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.EuclideanDistanceLoss"]], "logcmk (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.LogCMK"]], "logcoshloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.LogCoshLoss"]], "lossfunction (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.LossFunction"]], "mseloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.MSELoss"]], "rmseloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.RMSELoss"]], "vonmisesfisher2dloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.VonMisesFisher2DLoss"]], "vonmisesfisher3dloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.VonMisesFisher3DLoss"]], "vonmisesfisherloss (class in graphnet.training.loss_functions)": [[79, "graphnet.training.loss_functions.VonMisesFisherLoss"]], "backward() (graphnet.training.loss_functions.logcmk static method)": [[79, "graphnet.training.loss_functions.LogCMK.backward"]], "forward() (graphnet.training.loss_functions.logcmk static method)": [[79, "graphnet.training.loss_functions.LogCMK.forward"]], "forward() (graphnet.training.loss_functions.lossfunction method)": [[79, "graphnet.training.loss_functions.LossFunction.forward"]], "graphnet.training.loss_functions": [[79, "module-graphnet.training.loss_functions"]], "log_cmk() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[79, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk"]], "log_cmk_approx() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[79, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk_approx"]], "log_cmk_exact() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[79, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk_exact"]], "collate_fn() (in module graphnet.training.utils)": [[80, "graphnet.training.utils.collate_fn"]], "get_predictions() (in module graphnet.training.utils)": [[80, "graphnet.training.utils.get_predictions"]], "graphnet.training.utils": [[80, "module-graphnet.training.utils"]], "make_dataloader() (in module graphnet.training.utils)": [[80, "graphnet.training.utils.make_dataloader"]], "make_train_validation_dataloader() (in module graphnet.training.utils)": [[80, "graphnet.training.utils.make_train_validation_dataloader"]], "save_results() (in module graphnet.training.utils)": [[80, "graphnet.training.utils.save_results"]], "bjoernlow (class in graphnet.training.weight_fitting)": [[81, "graphnet.training.weight_fitting.BjoernLow"]], "uniform (class in graphnet.training.weight_fitting)": [[81, "graphnet.training.weight_fitting.Uniform"]], "weightfitter (class in graphnet.training.weight_fitting)": [[81, "graphnet.training.weight_fitting.WeightFitter"]], "fit() (graphnet.training.weight_fitting.weightfitter method)": [[81, "graphnet.training.weight_fitting.WeightFitter.fit"]], "graphnet.training.weight_fitting": [[81, "module-graphnet.training.weight_fitting"]], "graphnet.utilities": [[82, "module-graphnet.utilities"]], "argumentparser (class in graphnet.utilities.argparse)": [[83, "graphnet.utilities.argparse.ArgumentParser"]], "options (class in graphnet.utilities.argparse)": [[83, "graphnet.utilities.argparse.Options"]], "contains() (graphnet.utilities.argparse.options method)": [[83, "graphnet.utilities.argparse.Options.contains"]], "graphnet.utilities.argparse": [[83, "module-graphnet.utilities.argparse"]], "pop_default() (graphnet.utilities.argparse.options method)": [[83, "graphnet.utilities.argparse.Options.pop_default"]], "standard_arguments (graphnet.utilities.argparse.argumentparser attribute)": [[83, "graphnet.utilities.argparse.ArgumentParser.standard_arguments"]], "with_standard_arguments() (graphnet.utilities.argparse.argumentparser method)": [[83, "graphnet.utilities.argparse.ArgumentParser.with_standard_arguments"]], "graphnet.utilities.config": [[84, "module-graphnet.utilities.config"]], "baseconfig (class in graphnet.utilities.config.base_config)": [[85, "graphnet.utilities.config.base_config.BaseConfig"]], "as_dict() (graphnet.utilities.config.base_config.baseconfig method)": [[85, "graphnet.utilities.config.base_config.BaseConfig.as_dict"]], "dump() (graphnet.utilities.config.base_config.baseconfig method)": [[85, "graphnet.utilities.config.base_config.BaseConfig.dump"]], "get_all_argument_values() (in module graphnet.utilities.config.base_config)": [[85, "graphnet.utilities.config.base_config.get_all_argument_values"]], "graphnet.utilities.config.base_config": [[85, "module-graphnet.utilities.config.base_config"]], "load() (graphnet.utilities.config.base_config.baseconfig class method)": [[85, "graphnet.utilities.config.base_config.BaseConfig.load"]], "model_config (graphnet.utilities.config.base_config.baseconfig attribute)": [[85, "graphnet.utilities.config.base_config.BaseConfig.model_config"]], "model_fields (graphnet.utilities.config.base_config.baseconfig attribute)": [[85, "graphnet.utilities.config.base_config.BaseConfig.model_fields"]], "configurable (class in graphnet.utilities.config.configurable)": [[86, "graphnet.utilities.config.configurable.Configurable"]], "config (graphnet.utilities.config.configurable.configurable property)": [[86, "graphnet.utilities.config.configurable.Configurable.config"]], "from_config() (graphnet.utilities.config.configurable.configurable class method)": [[86, "graphnet.utilities.config.configurable.Configurable.from_config"]], "graphnet.utilities.config.configurable": [[86, "module-graphnet.utilities.config.configurable"]], "save_config() (graphnet.utilities.config.configurable.configurable method)": [[86, "graphnet.utilities.config.configurable.Configurable.save_config"]], "datasetconfig (class in graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig"]], "datasetconfigsaverabcmeta (class in graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfigSaverABCMeta"]], "datasetconfigsavermeta (class in graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfigSaverMeta"]], "as_dict() (graphnet.utilities.config.dataset_config.datasetconfig method)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.as_dict"]], "features (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.features"]], "graph_definition (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.graph_definition"]], "graphnet.utilities.config.dataset_config": [[87, "module-graphnet.utilities.config.dataset_config"]], "index_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.index_column"]], "loss_weight_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_column"]], "loss_weight_default_value (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_default_value"]], "loss_weight_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_table"]], "model_config (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.model_config"]], "model_fields (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.model_fields"]], "node_truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth"]], "node_truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth_table"]], "path (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.path"]], "pulsemaps (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.pulsemaps"]], "save_dataset_config() (in module graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.save_dataset_config"]], "seed (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.seed"]], "selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.selection"]], "string_selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.string_selection"]], "truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.truth"]], "truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.truth_table"]], "modelconfig (class in graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.ModelConfig"]], "modelconfigsaverabc (class in graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.ModelConfigSaverABC"]], "modelconfigsavermeta (class in graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.ModelConfigSaverMeta"]], "arguments (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.arguments"]], "as_dict() (graphnet.utilities.config.model_config.modelconfig method)": [[88, "graphnet.utilities.config.model_config.ModelConfig.as_dict"]], "class_name (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.class_name"]], "graphnet.utilities.config.model_config": [[88, "module-graphnet.utilities.config.model_config"]], "model_config (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.model_config"]], "model_fields (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.model_fields"]], "save_model_config() (in module graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.save_model_config"]], "get_all_grapnet_classes() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.get_all_grapnet_classes"]], "get_graphnet_classes() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.get_graphnet_classes"]], "graphnet.utilities.config.parsing": [[89, "module-graphnet.utilities.config.parsing"]], "is_graphnet_class() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.is_graphnet_class"]], "is_graphnet_module() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.is_graphnet_module"]], "list_all_submodules() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.list_all_submodules"]], "traverse_and_apply() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.traverse_and_apply"]], "trainingconfig (class in graphnet.utilities.config.training_config)": [[90, "graphnet.utilities.config.training_config.TrainingConfig"]], "dataloader (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.dataloader"]], "early_stopping_patience (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.early_stopping_patience"]], "fit (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.fit"]], "graphnet.utilities.config.training_config": [[90, "module-graphnet.utilities.config.training_config"]], "model_config (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.model_config"]], "model_fields (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.model_fields"]], "target (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.target"]], "graphnet.utilities.decorators": [[91, "module-graphnet.utilities.decorators"]], "find_i3_files() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.find_i3_files"]], "graphnet.utilities.filesys": [[92, "module-graphnet.utilities.filesys"]], "has_extension() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.has_extension"]], "is_gcd_file() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.is_gcd_file"]], "is_i3_file() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.is_i3_file"]], "graphnet.utilities.imports": [[93, "module-graphnet.utilities.imports"]], "has_icecube_package() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.has_icecube_package"]], "has_pisa_package() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.has_pisa_package"]], "has_torch_package() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.has_torch_package"]], "requires_icecube() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.requires_icecube"]], "logger (class in graphnet.utilities.logging)": [[94, "graphnet.utilities.logging.Logger"]], "repeatfilter (class in graphnet.utilities.logging)": [[94, "graphnet.utilities.logging.RepeatFilter"]], "critical() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.critical"]], "debug() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.debug"]], "error() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.error"]], "file_handlers (graphnet.utilities.logging.logger property)": [[94, "graphnet.utilities.logging.Logger.file_handlers"]], "filter() (graphnet.utilities.logging.repeatfilter method)": [[94, "graphnet.utilities.logging.RepeatFilter.filter"]], "graphnet.utilities.logging": [[94, "module-graphnet.utilities.logging"]], "handlers (graphnet.utilities.logging.logger property)": [[94, "graphnet.utilities.logging.Logger.handlers"]], "info() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.info"]], "nb_repeats_allowed (graphnet.utilities.logging.repeatfilter attribute)": [[94, "graphnet.utilities.logging.RepeatFilter.nb_repeats_allowed"]], "setlevel() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.setLevel"]], "stream_handlers (graphnet.utilities.logging.logger property)": [[94, "graphnet.utilities.logging.Logger.stream_handlers"]], "warning() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.warning"]], "warning_once() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.warning_once"]], "eps_like() (in module graphnet.utilities.maths)": [[95, "graphnet.utilities.maths.eps_like"]], "graphnet.utilities.maths": [[95, "module-graphnet.utilities.maths"]]}}) \ No newline at end of file +Search.setIndex({"docnames": ["about", "api/graphnet", "api/graphnet.constants", "api/graphnet.data", "api/graphnet.data.constants", "api/graphnet.data.dataconverter", "api/graphnet.data.dataloader", "api/graphnet.data.dataset", "api/graphnet.data.dataset.dataset", "api/graphnet.data.dataset.parquet", "api/graphnet.data.dataset.parquet.parquet_dataset", "api/graphnet.data.dataset.sqlite", "api/graphnet.data.dataset.sqlite.sqlite_dataset", "api/graphnet.data.extractors", "api/graphnet.data.extractors.i3extractor", "api/graphnet.data.extractors.i3featureextractor", "api/graphnet.data.extractors.i3genericextractor", "api/graphnet.data.extractors.i3hybridrecoextractor", "api/graphnet.data.extractors.i3ntmuonlabelsextractor", "api/graphnet.data.extractors.i3particleextractor", "api/graphnet.data.extractors.i3pisaextractor", "api/graphnet.data.extractors.i3quesoextractor", "api/graphnet.data.extractors.i3retroextractor", "api/graphnet.data.extractors.i3splinempeextractor", "api/graphnet.data.extractors.i3truthextractor", "api/graphnet.data.extractors.i3tumextractor", "api/graphnet.data.extractors.utilities", "api/graphnet.data.extractors.utilities.collections", "api/graphnet.data.extractors.utilities.frames", "api/graphnet.data.extractors.utilities.types", "api/graphnet.data.parquet", "api/graphnet.data.parquet.parquet_dataconverter", "api/graphnet.data.pipeline", "api/graphnet.data.sqlite", "api/graphnet.data.sqlite.sqlite_dataconverter", "api/graphnet.data.sqlite.sqlite_utilities", "api/graphnet.data.utilities", "api/graphnet.data.utilities.parquet_to_sqlite", "api/graphnet.data.utilities.random", "api/graphnet.data.utilities.string_selection_resolver", "api/graphnet.deployment", "api/graphnet.deployment.i3modules", "api/graphnet.deployment.i3modules.deployer", "api/graphnet.deployment.i3modules.graphnet_module", "api/graphnet.models", "api/graphnet.models.coarsening", "api/graphnet.models.components", "api/graphnet.models.components.layers", "api/graphnet.models.components.pool", "api/graphnet.models.detector", "api/graphnet.models.detector.detector", "api/graphnet.models.detector.icecube", "api/graphnet.models.detector.prometheus", "api/graphnet.models.gnn", "api/graphnet.models.gnn.convnet", "api/graphnet.models.gnn.dynedge", "api/graphnet.models.gnn.dynedge_jinst", "api/graphnet.models.gnn.dynedge_kaggle_tito", "api/graphnet.models.gnn.gnn", "api/graphnet.models.graphs", "api/graphnet.models.graphs.edges", "api/graphnet.models.graphs.edges.edges", "api/graphnet.models.graphs.graph_definition", "api/graphnet.models.graphs.graphs", "api/graphnet.models.graphs.nodes", "api/graphnet.models.graphs.nodes.nodes", "api/graphnet.models.model", "api/graphnet.models.standard_model", "api/graphnet.models.task", "api/graphnet.models.task.classification", "api/graphnet.models.task.reconstruction", "api/graphnet.models.task.task", "api/graphnet.models.utils", "api/graphnet.pisa", "api/graphnet.pisa.fitting", "api/graphnet.pisa.plotting", "api/graphnet.training", "api/graphnet.training.callbacks", "api/graphnet.training.labels", "api/graphnet.training.loss_functions", "api/graphnet.training.utils", "api/graphnet.training.weight_fitting", "api/graphnet.utilities", "api/graphnet.utilities.argparse", "api/graphnet.utilities.config", "api/graphnet.utilities.config.base_config", "api/graphnet.utilities.config.configurable", "api/graphnet.utilities.config.dataset_config", "api/graphnet.utilities.config.model_config", "api/graphnet.utilities.config.parsing", "api/graphnet.utilities.config.training_config", "api/graphnet.utilities.decorators", "api/graphnet.utilities.filesys", "api/graphnet.utilities.imports", "api/graphnet.utilities.logging", "api/graphnet.utilities.maths", "api/modules", "contribute", "index", "install"], "filenames": ["about.md", "api/graphnet.rst", "api/graphnet.constants.rst", "api/graphnet.data.rst", "api/graphnet.data.constants.rst", "api/graphnet.data.dataconverter.rst", "api/graphnet.data.dataloader.rst", "api/graphnet.data.dataset.rst", "api/graphnet.data.dataset.dataset.rst", "api/graphnet.data.dataset.parquet.rst", "api/graphnet.data.dataset.parquet.parquet_dataset.rst", "api/graphnet.data.dataset.sqlite.rst", "api/graphnet.data.dataset.sqlite.sqlite_dataset.rst", "api/graphnet.data.extractors.rst", "api/graphnet.data.extractors.i3extractor.rst", "api/graphnet.data.extractors.i3featureextractor.rst", "api/graphnet.data.extractors.i3genericextractor.rst", "api/graphnet.data.extractors.i3hybridrecoextractor.rst", "api/graphnet.data.extractors.i3ntmuonlabelsextractor.rst", "api/graphnet.data.extractors.i3particleextractor.rst", "api/graphnet.data.extractors.i3pisaextractor.rst", "api/graphnet.data.extractors.i3quesoextractor.rst", "api/graphnet.data.extractors.i3retroextractor.rst", "api/graphnet.data.extractors.i3splinempeextractor.rst", "api/graphnet.data.extractors.i3truthextractor.rst", "api/graphnet.data.extractors.i3tumextractor.rst", "api/graphnet.data.extractors.utilities.rst", "api/graphnet.data.extractors.utilities.collections.rst", "api/graphnet.data.extractors.utilities.frames.rst", "api/graphnet.data.extractors.utilities.types.rst", "api/graphnet.data.parquet.rst", "api/graphnet.data.parquet.parquet_dataconverter.rst", "api/graphnet.data.pipeline.rst", "api/graphnet.data.sqlite.rst", "api/graphnet.data.sqlite.sqlite_dataconverter.rst", "api/graphnet.data.sqlite.sqlite_utilities.rst", "api/graphnet.data.utilities.rst", "api/graphnet.data.utilities.parquet_to_sqlite.rst", "api/graphnet.data.utilities.random.rst", "api/graphnet.data.utilities.string_selection_resolver.rst", "api/graphnet.deployment.rst", "api/graphnet.deployment.i3modules.rst", "api/graphnet.deployment.i3modules.deployer.rst", "api/graphnet.deployment.i3modules.graphnet_module.rst", "api/graphnet.models.rst", "api/graphnet.models.coarsening.rst", "api/graphnet.models.components.rst", "api/graphnet.models.components.layers.rst", "api/graphnet.models.components.pool.rst", "api/graphnet.models.detector.rst", "api/graphnet.models.detector.detector.rst", "api/graphnet.models.detector.icecube.rst", "api/graphnet.models.detector.prometheus.rst", "api/graphnet.models.gnn.rst", "api/graphnet.models.gnn.convnet.rst", "api/graphnet.models.gnn.dynedge.rst", "api/graphnet.models.gnn.dynedge_jinst.rst", "api/graphnet.models.gnn.dynedge_kaggle_tito.rst", "api/graphnet.models.gnn.gnn.rst", "api/graphnet.models.graphs.rst", "api/graphnet.models.graphs.edges.rst", "api/graphnet.models.graphs.edges.edges.rst", "api/graphnet.models.graphs.graph_definition.rst", "api/graphnet.models.graphs.graphs.rst", "api/graphnet.models.graphs.nodes.rst", "api/graphnet.models.graphs.nodes.nodes.rst", "api/graphnet.models.model.rst", "api/graphnet.models.standard_model.rst", "api/graphnet.models.task.rst", "api/graphnet.models.task.classification.rst", "api/graphnet.models.task.reconstruction.rst", "api/graphnet.models.task.task.rst", "api/graphnet.models.utils.rst", "api/graphnet.pisa.rst", "api/graphnet.pisa.fitting.rst", "api/graphnet.pisa.plotting.rst", "api/graphnet.training.rst", "api/graphnet.training.callbacks.rst", "api/graphnet.training.labels.rst", "api/graphnet.training.loss_functions.rst", "api/graphnet.training.utils.rst", "api/graphnet.training.weight_fitting.rst", "api/graphnet.utilities.rst", "api/graphnet.utilities.argparse.rst", "api/graphnet.utilities.config.rst", "api/graphnet.utilities.config.base_config.rst", "api/graphnet.utilities.config.configurable.rst", "api/graphnet.utilities.config.dataset_config.rst", "api/graphnet.utilities.config.model_config.rst", "api/graphnet.utilities.config.parsing.rst", "api/graphnet.utilities.config.training_config.rst", "api/graphnet.utilities.decorators.rst", "api/graphnet.utilities.filesys.rst", "api/graphnet.utilities.imports.rst", "api/graphnet.utilities.logging.rst", "api/graphnet.utilities.maths.rst", "api/modules.rst", "contribute.md", "index.rst", "install.md"], "titles": ["About", "API", "constants", "data", "constants", "dataconverter", "dataloader", "dataset", "dataset", "parquet", "parquet_dataset", "sqlite", "sqlite_dataset", "extractors", "i3extractor", "i3featureextractor", "i3genericextractor", "i3hybridrecoextractor", "i3ntmuonlabelsextractor", "i3particleextractor", "i3pisaextractor", "i3quesoextractor", "i3retroextractor", "i3splinempeextractor", "i3truthextractor", "i3tumextractor", "utilities", "collections", "frames", "types", "parquet", "parquet_dataconverter", "pipeline", "sqlite", "sqlite_dataconverter", "sqlite_utilities", "utilities", "parquet_to_sqlite", "random", "string_selection_resolver", "deployment", "i3modules", "deployer", "graphnet_module", "models", "coarsening", "components", "layers", "pool", "detector", "detector", "icecube", "prometheus", "gnn", "convnet", "dynedge", "dynedge_jinst", "dynedge_kaggle_tito", "gnn", "graphs", "edges", "edges", "graph_definition", "graphs", "nodes", "nodes", "model", "standard_model", "task", "classification", "reconstruction", "task", "utils", "pisa", "fitting", "plotting", "training", "callbacks", "labels", "loss_functions", "utils", "weight_fitting", "utilities", "argparse", "config", "base_config", "configurable", "dataset_config", "model_config", "parsing", "training_config", "decorators", "filesys", "imports", "logging", "maths", "src", "Contribute", "About", "Install"], "terms": {"graphnet": [0, 1, 2, 3, 4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34, 35, 36, 37, 38, 39, 40, 74, 75, 76, 77, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 97, 98, 99], "i": [0, 1, 14, 16, 27, 28, 29, 34, 35, 38, 39, 75, 77, 81, 83, 88, 89, 92, 93, 94, 97, 98, 99], "an": [0, 5, 29, 31, 34, 39, 92, 94, 97, 98, 99], "open": [0, 97, 98], "sourc": [0, 4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 34, 35, 37, 38, 39, 74, 75, 77, 81, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 97, 98], "python": [0, 1, 5, 13, 14, 16, 27, 29, 97, 98, 99], "framework": [0, 98], "aim": [0, 1, 97, 98], "provid": [0, 1, 97, 98, 99], "high": [0, 98], "qualiti": [0, 98], "user": [0, 77, 98, 99], "friendli": [0, 98], "end": [0, 1, 5, 31, 34, 98], "function": [0, 5, 29, 35, 38, 74, 75, 82, 87, 88, 89, 92, 93, 95, 98], "perform": [0, 98], "reconstruct": [0, 1, 15, 17, 18, 22, 23, 25, 40, 44, 68, 98], "task": [0, 1, 44, 97, 98], "neutrino": [0, 1, 74, 98], "telescop": [0, 1, 98], "us": [0, 1, 2, 4, 5, 14, 19, 24, 26, 27, 31, 34, 35, 36, 37, 39, 40, 74, 77, 81, 82, 83, 84, 85, 87, 88, 89, 90, 93, 94, 97, 98, 99], "graph": [0, 1, 44, 97, 98], "neural": [0, 1, 98], "network": [0, 1, 98], "gnn": [0, 1, 44, 98, 99], "make": [0, 5, 81, 87, 88, 97, 98, 99], "fast": [0, 98, 99], "easi": [0, 98], "train": [0, 1, 39, 40, 77, 81, 83, 87, 88, 90, 96, 98, 99], "complex": [0, 98], "model": [0, 1, 40, 75, 76, 77, 83, 85, 87, 88, 90, 96, 98, 99], "can": [0, 1, 14, 16, 19, 37, 74, 75, 81, 83, 85, 87, 88, 97, 98, 99], "event": [0, 1, 21, 35, 37, 39, 74, 81, 87, 98], "state": [0, 98], "art": [0, 98], "arbitrari": [0, 98], "detector": [0, 1, 24, 44, 98], "configur": [0, 1, 74, 82, 84, 85, 87, 88, 90, 94, 98], "infer": [0, 1, 40, 98, 99], "time": [0, 4, 35, 94, 98, 99], "ar": [0, 1, 4, 5, 16, 29, 31, 34, 37, 39, 74, 81, 87, 88, 97, 98, 99], "order": [0, 27, 98], "magnitud": [0, 98], "faster": [0, 98], "than": [0, 94, 98], "tradit": [0, 98], "techniqu": [0, 98], "common": [0, 1, 85, 87, 88, 90, 91, 93, 98], "ml": [0, 1, 98], "develop": [0, 1, 97, 98, 99], "physicist": [0, 1, 98], "wish": [0, 97, 98], "tool": [0, 1, 98], "research": [0, 98], "By": [0, 37, 98], "unit": [0, 5, 93, 97, 98], "both": [0, 16, 75, 98], "group": [0, 5, 31, 34, 98], "increas": [0, 77, 98], "longev": [0, 98], "usabl": [0, 98], "individu": [0, 5, 98], "code": [0, 24, 35, 87, 88, 98], "contribut": [0, 98, 99], "from": [0, 1, 13, 14, 16, 18, 19, 21, 27, 28, 29, 34, 37, 75, 77, 85, 86, 87, 88, 90, 94, 97, 98, 99], "build": [0, 1, 85, 87, 88, 98], "gener": [0, 5, 16, 98], "reusabl": [0, 98], "softwar": [0, 98], "packag": [0, 1, 38, 89, 92, 93, 97, 98, 99], "base": [0, 4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 31, 34, 37, 39, 74, 77, 81, 83, 85, 86, 87, 88, 90, 93, 94, 98], "engin": [0, 98], "best": [0, 97, 98], "practic": [0, 97, 98], "lower": [0, 75, 98], "technic": [0, 98], "threshold": [0, 98], "most": [0, 1, 39, 98, 99], "scientif": [0, 1, 98], "problem": [0, 97, 98], "The": [0, 5, 27, 29, 34, 35, 74, 75, 77, 98], "improv": [0, 1, 83, 98], "classif": [0, 1, 44, 68, 98], "yield": [0, 74, 98], "veri": [0, 39, 98], "accur": [0, 98], "e": [0, 1, 5, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 29, 31, 34, 35, 39, 77, 81, 85, 94, 97, 98, 99], "g": [0, 1, 5, 24, 27, 29, 31, 34, 35, 39, 81, 94, 97, 98, 99], "low": [0, 98], "energi": [0, 4, 81, 98], "observ": [0, 98], "icecub": [0, 1, 15, 28, 29, 44, 49, 93, 98, 99], "here": [0, 97, 98, 99], "implement": [0, 1, 5, 14, 30, 31, 33, 34, 97, 98], "wa": [0, 98], "appli": [0, 14, 89, 98], "oscil": [0, 73, 98], "lead": [0, 98], "signific": [0, 98], "angular": [0, 98], "rang": [0, 98], "relev": [0, 1, 29, 38, 92, 97, 98], "studi": [0, 98], "furthermor": [0, 98], "shown": [0, 98], "could": [0, 97, 98], "muon": [0, 18, 98], "v": [0, 98], "therebi": [0, 1, 87, 88, 98], "effici": [0, 98], "puriti": [0, 98], "sampl": [0, 39, 98], "analysi": [0, 98, 99], "similarli": [0, 29, 98], "ha": [0, 5, 29, 31, 34, 35, 92, 98, 99], "great": [0, 98], "point": [0, 23, 98], "analys": [0, 40, 73, 98], "final": [0, 77, 87, 98], "millisecond": [0, 98], "allow": [0, 40, 77, 85, 90, 98, 99], "whole": [0, 98], "new": [0, 1, 34, 85, 90, 97, 98], "type": [0, 5, 13, 14, 26, 27, 28, 31, 34, 35, 37, 38, 39, 74, 75, 77, 81, 83, 85, 86, 87, 88, 89, 92, 93, 94, 95, 97, 98], "cosmic": [0, 98], "alert": [0, 98], "which": [0, 14, 15, 24, 28, 39, 74, 83, 98, 99], "were": [0, 98], "previous": [0, 98], "unfeas": [0, 98], "possibl": [0, 27, 97, 98], "identifi": [0, 5, 24, 87, 88, 98], "10": [0, 83, 98], "tev": [0, 98], "monitor": [0, 98], "rate": [0, 77, 98], "direct": [0, 98], "real": [0, 98], "thi": [0, 3, 5, 14, 16, 29, 31, 34, 35, 38, 74, 75, 77, 81, 85, 87, 88, 90, 94, 97, 98, 99], "enabl": [0, 3, 98], "first": [0, 77, 85, 90, 97, 98], "ever": [0, 98], "despit": [0, 98], "larg": [0, 98], "background": [0, 98], "origin": [0, 74, 98], "compris": [0, 98], "number": [0, 5, 31, 34, 39, 77, 83, 98], "modul": [0, 3, 29, 40, 73, 76, 82, 84, 87, 88, 89, 90, 93, 98], "necessari": [0, 27, 97, 98], "workflow": [0, 98], "ingest": [0, 1, 3, 98], "raw": [0, 98], "data": [0, 1, 4, 5, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 83, 85, 87, 90, 93, 96, 98, 99], "domain": [0, 1, 3, 40, 98], "specif": [0, 1, 3, 5, 15, 29, 30, 31, 33, 34, 35, 40, 97, 98, 99], "format": [0, 1, 3, 5, 27, 31, 34, 75, 87, 97, 98, 99], "deploi": [0, 1, 40, 98], "chain": [0, 1, 40, 98, 99], "illustr": [0, 97, 98], "figur": [0, 75, 98], "level": [0, 24, 35, 94, 98, 99], "overview": [0, 98], "typic": [0, 27, 98], "convert": [0, 1, 3, 5, 27, 31, 34, 37, 98, 99], "industri": [0, 3, 98], "standard": [0, 3, 4, 5, 31, 34, 39, 83, 97, 98], "intermedi": [0, 1, 3, 5, 31, 34, 98, 99], "file": [0, 1, 3, 5, 14, 27, 31, 34, 37, 38, 74, 77, 83, 84, 85, 86, 87, 88, 92, 94, 98, 99], "read": [0, 3, 27, 98, 99], "simpl": [0, 98], "physic": [0, 1, 14, 28, 29, 40, 98], "orient": [0, 98], "compon": [0, 1, 44, 98], "manag": [0, 14, 76, 98], "experi": [0, 1, 76, 98], "log": [0, 1, 76, 77, 82, 98, 99], "deploy": [0, 1, 41, 96, 98], "modular": [0, 98], "subclass": [0, 98], "torch": [0, 93, 98, 99], "nn": [0, 98], "mean": [0, 5, 31, 34, 88, 98], "onli": [0, 1, 74, 81, 88, 93, 98, 99], "need": [0, 27, 98, 99], "import": [0, 1, 35, 82, 98], "few": [0, 97, 98], "exist": [0, 34, 35, 87, 98], "purpos": [0, 98], "built": [0, 98], "them": [0, 1, 27, 74, 98, 99], "togeth": [0, 98], "form": [0, 85, 90, 98], "complet": [0, 98], "extend": [0, 1, 98], "suit": [0, 98], "through": [0, 98], "layer": [0, 44, 46, 98], "connect": [0, 98], "etc": [0, 94, 98], "optimis": [0, 1, 98], "differ": [0, 14, 97, 98, 99], "track": [0, 14, 18, 97, 98], "These": [0, 97, 98], "prepar": [0, 98], "satisfi": [0, 98], "o": [0, 98], "load": [0, 38, 85, 87, 98], "requir": [0, 20, 35, 87, 88, 90, 98, 99], "when": [0, 5, 27, 31, 34, 35, 94, 97, 98, 99], "batch": [0, 83, 98], "do": [0, 87, 88, 97, 98, 99], "predict": [0, 19, 23, 25, 98], "either": [0, 98, 99], "contain": [0, 5, 27, 28, 31, 34, 81, 83, 98, 99], "imag": [0, 1, 97, 98, 99], "portabl": [0, 98], "depend": [0, 98, 99], "free": [0, 98], "split": [0, 98], "up": [0, 5, 31, 34, 97, 98, 99], "interfac": [0, 73, 87, 88, 98, 99], "block": [0, 1, 98], "pre": [0, 97, 98], "directli": [0, 14, 98], "while": [0, 16, 77, 98], "continu": [0, 98], "expand": [0, 98], "": [0, 5, 14, 27, 34, 37, 77, 81, 83, 87, 88, 94, 95, 98, 99], "capabl": [0, 98], "project": [0, 97, 98], "receiv": [0, 98], "fund": [0, 98], "european": [0, 98], "union": [0, 16, 27, 29, 87, 90, 92, 98], "horizon": [0, 98], "2020": [0, 98], "innov": [0, 98], "programm": [0, 98], "under": [0, 98], "mari": [0, 98], "sk\u0142odowska": [0, 98], "curi": [0, 98], "grant": [0, 98], "agreement": [0, 97, 98], "No": [0, 98], "890778": [0, 98], "work": [0, 4, 28, 97, 98, 99], "rasmu": [0, 98], "\u00f8rs\u00f8e": [0, 98], "partli": [0, 98], "punch4nfdi": [0, 98], "consortium": [0, 98], "support": [0, 29, 97, 98, 99], "dfg": [0, 98], "nfdi": [0, 98], "39": [0, 98, 99], "1": [0, 5, 27, 31, 34, 39, 77, 81, 87, 98, 99], "germani": [0, 98], "conveni": [1, 97, 99], "collabor": 1, "solv": [1, 97], "It": [1, 27, 35, 97], "leverag": 1, "advanc": 1, "machin": [1, 99], "learn": [1, 77, 99], "without": [1, 74, 99], "have": [1, 5, 16, 31, 34, 35, 39, 97, 99], "expert": 1, "themselv": [1, 87, 88], "acceler": 1, "area": 1, "phyic": 1, "design": 1, "principl": 1, "all": [1, 5, 14, 16, 31, 34, 35, 85, 86, 87, 88, 89, 90, 94, 97, 99], "streamlin": 1, "process": [1, 5, 14, 97, 99], "transform": [1, 81], "extens": [1, 92], "basic": 1, "across": [1, 2, 29, 36, 82, 83, 84, 94], "variou": 1, "easili": 1, "architectur": 1, "main": [1, 97, 99], "featur": [1, 3, 4, 5, 15, 87, 97], "i3": [1, 5, 14, 28, 29, 31, 34, 38, 92, 99], "more": [1, 35, 38, 85, 87, 88, 90, 94], "index": [1, 5, 29, 35, 77], "sqlite": [1, 3, 7, 34, 35, 37, 99], "suitabl": 1, "plug": 1, "plai": 1, "abstract": [1, 5, 86], "awai": 1, "detail": [1, 99], "expos": 1, "physicst": 1, "what": [1, 97], "i3modul": [1, 40], "includ": [1, 74, 85, 97], "docker": 1, "run": [1, 37], "containeris": 1, "fashion": 1, "subpackag": [1, 3, 7, 13, 40, 44, 59, 82], "dataset": [1, 3, 18, 39, 83, 87], "extractor": [1, 3, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34], "parquet": [1, 3, 7, 31, 37, 99], "util": [1, 3, 13, 27, 28, 29, 35, 37, 38, 39, 44, 76, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96], "constant": [1, 3, 96], "dataconvert": [1, 3, 31, 34], "dataload": [1, 3, 90], "pipelin": [1, 3], "coarsen": [1, 44], "standard_model": [1, 44], "pisa": [1, 20, 74, 75, 93, 96, 99], "fit": [1, 73, 75, 81, 90], "plot": [1, 73], "callback": [1, 76], "label": [1, 18, 21, 75, 76], "loss_funct": [1, 76], "weight_fit": [1, 76], "config": [1, 39, 74, 82, 83, 85, 86, 87, 88, 89, 90], "argpars": [1, 82], "decor": [1, 5, 82, 93], "filesi": [1, 82], "math": [1, 82], "submodul": [1, 3, 7, 9, 11, 13, 26, 30, 33, 36, 41, 44, 46, 49, 53, 59, 60, 64, 68, 73, 76, 82, 84, 89], "global": [2, 4], "i3extractor": [3, 5, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 31, 34], "i3featureextractor": [3, 4, 13, 34], "i3genericextractor": [3, 13, 34], "i3hybridrecoextractor": [3, 13], "i3ntmuonlabelsextractor": [3, 13], "i3particleextractor": [3, 13], "i3pisaextractor": [3, 13], "i3quesoextractor": [3, 13], "i3retroextractor": [3, 13], "i3splinempeextractor": [3, 13], "i3truthextractor": [3, 4, 13], "i3tumextractor": [3, 13], "parquet_dataconvert": [3, 30], "sqlite_dataconvert": [3, 33], "sqlite_util": [3, 33], "parquet_to_sqlit": [3, 36], "random": [3, 36, 39, 87], "string_selection_resolv": [3, 36], "truth": [3, 4, 15, 24, 35, 81, 87], "fileset": [3, 5], "init_global_index": [3, 5], "cache_output_fil": [3, 5], "class": [4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 29, 30, 31, 33, 34, 37, 39, 74, 77, 81, 83, 85, 86, 87, 88, 89, 90, 94, 97], "object": [4, 5, 14, 16, 27, 29, 74, 83, 94], "namespac": [4, 87, 88], "name": [4, 5, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31, 34, 35, 37, 74, 81, 83, 85, 87, 88, 89, 90, 94, 97, 99], "icecube86": 4, "dom_x": 4, "dom_i": 4, "dom_z": 4, "dom_tim": 4, "charg": 4, "rde": 4, "pmt_area": 4, "deepcor": [4, 15], "upgrad": [4, 15, 99], "string": [4, 5, 27, 31, 34, 39, 85], "pmt_number": 4, "dom_numb": 4, "pmt_dir_x": 4, "pmt_dir_i": 4, "pmt_dir_z": 4, "dom_typ": 4, "prometheu": [4, 44, 49], "sensor_pos_x": 4, "sensor_pos_i": 4, "sensor_pos_z": 4, "t": [4, 29, 35, 75, 77, 99], "kaggl": 4, "x": [4, 5, 24, 31, 34, 75, 81], "y": [4, 24, 75, 99], "z": [4, 5, 24, 31, 34, 99], "auxiliari": 4, "energy_track": 4, "position_x": 4, "position_i": 4, "position_z": 4, "azimuth": 4, "zenith": 4, "pid": [4, 39, 87], "elast": 4, "sim_typ": 4, "interaction_typ": 4, "interaction_tim": 4, "inelast": 4, "stopped_muon": 4, "injection_energi": 4, "injection_typ": 4, "injection_interaction_typ": 4, "injection_zenith": 4, "injection_azimuth": 4, "injection_bjorkenx": 4, "injection_bjorkeni": 4, "injection_position_x": 4, "injection_position_i": 4, "injection_position_z": 4, "injection_column_depth": 4, "primary_lepton_1_typ": 4, "primary_hadron_1_typ": 4, "primary_lepton_1_position_x": 4, "primary_lepton_1_position_i": 4, "primary_lepton_1_position_z": 4, "primary_hadron_1_position_x": 4, "primary_hadron_1_position_i": 4, "primary_hadron_1_position_z": 4, "primary_lepton_1_direction_theta": 4, "primary_lepton_1_direction_phi": 4, "primary_hadron_1_direction_theta": 4, "primary_hadron_1_direction_phi": 4, "primary_lepton_1_energi": 4, "primary_hadron_1_energi": 4, "total_energi": 4, "i3_fil": [5, 14], "str": [5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 34, 35, 37, 38, 39, 74, 81, 83, 85, 86, 87, 88, 89, 90, 92, 94], "gcd_file": [5, 14], "paramet": [5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 34, 35, 37, 38, 39, 74, 75, 77, 81, 83, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95], "output_fil": [5, 31, 34], "global_index": 5, "avail": [5, 16, 93], "pool": [5, 44, 46], "worker": [5, 31, 34, 38, 83, 94], "return": [5, 14, 27, 28, 29, 31, 34, 35, 37, 38, 39, 74, 75, 77, 81, 83, 85, 86, 87, 88, 89, 92, 93, 94, 95], "none": [5, 14, 16, 24, 28, 29, 31, 34, 35, 37, 39, 74, 77, 81, 83, 85, 86, 87, 89, 92, 94], "synchron": 5, "list": [5, 14, 16, 24, 27, 29, 31, 34, 35, 37, 38, 39, 75, 77, 81, 87, 89, 90, 92, 94], "process_method": 5, "cach": 5, "output": [5, 31, 34, 37, 74, 81, 87, 88, 99], "typevar": 5, "f": 5, "bound": [5, 75], "callabl": [5, 29, 81, 85, 87, 88, 89, 93], "ani": [5, 27, 28, 29, 31, 34, 75, 81, 83, 85, 86, 87, 88, 89, 90, 94, 99], "outdir": [5, 31, 34, 37, 74], "gcd_rescu": [5, 31, 34, 92], "nb_files_to_batch": [5, 31, 34], "sequential_batch_pattern": [5, 31, 34], "input_file_batch_pattern": [5, 31, 34], "index_column": [5, 31, 34, 35, 39, 74, 81, 87], "icetray_verbos": [5, 31, 34], "abc": [5, 14, 81, 86, 87, 88], "logger": [5, 14, 37, 39, 81, 82, 94, 99], "construct": [5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 31, 34, 37, 39, 74, 77, 81, 83, 86, 87, 88, 94], "regular": [5, 29, 31, 34], "express": [5, 31, 34], "accord": [5, 31, 34], "match": [5, 31, 34, 81, 92, 95], "certain": [5, 31, 34, 37, 74], "pattern": [5, 31, 34], "wildcard": [5, 31, 34], "same": [5, 29, 31, 34, 35, 77, 89, 94], "input": [5, 31, 34, 85, 90], "replac": [5, 31, 34, 85, 87, 88, 90], "period": [5, 31, 34], "special": [5, 16, 31, 34], "interpret": [5, 31, 34], "liter": [5, 31, 34], "charact": [5, 31, 34], "regex": [5, 31, 34], "For": [5, 29, 31, 34, 77], "instanc": [5, 14, 24, 29, 31, 34, 74, 86, 88, 99], "A": [5, 31, 34, 74, 81, 99], "_": [5, 31, 34], "0": [5, 31, 34, 39, 74, 75, 87], "9": [5, 31, 34], "5": [5, 31, 34, 39, 83, 99], "zst": [5, 31, 34], "find": [5, 31, 34, 92], "whose": [5, 31, 34], "one": [5, 31, 34, 35, 87, 88, 92, 97, 99], "capit": [5, 31, 34], "letter": [5, 31, 34], "follow": [5, 31, 34, 81, 97, 99], "underscor": [5, 31, 34], "five": [5, 31, 34], "upgrade_genie_step4_141020_a_000000": [5, 31, 34], "upgrade_genie_step4_141020_a_000001": [5, 31, 34], "upgrade_genie_step4_141020_a_000008": [5, 31, 34], "upgrade_genie_step4_141020_a_000009": [5, 31, 34], "would": [5, 31, 34, 97], "upgrade_genie_step4_141020_a_00000x": [5, 31, 34], "suffix": [5, 31, 34], "upgrade_genie_step4_141020_a_000010": [5, 31, 34], "separ": [5, 27, 31, 34, 77, 99], "upgrade_genie_step4_141020_a_00001x": [5, 31, 34], "int": [5, 18, 21, 31, 34, 39, 74, 77, 81, 83, 87, 90, 94], "properti": [5, 14, 19, 29, 86, 94], "file_suffix": [5, 31, 34], "execut": [5, 35], "method": [5, 14, 26, 27, 28, 29, 31, 34, 81], "set": [5, 16, 97], "inherit": [5, 14, 29, 94], "path": [5, 35, 38, 74, 75, 83, 85, 86, 87, 92, 99], "correspond": [5, 27, 29, 34, 38, 81, 92, 99], "gcd": [5, 14, 28, 38, 92], "save_data": [5, 31, 34], "save": [5, 14, 27, 31, 34, 35, 74, 81, 85, 86, 87, 88, 99], "ordereddict": [5, 31, 34], "extract": [5, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 34, 37, 38], "merge_fil": [5, 31, 34], "input_fil": [5, 31, 34], "merg": [5, 31, 34, 99], "result": [5, 31, 34, 77, 89, 99], "option": [5, 24, 31, 34, 74, 75, 81, 82, 83, 85, 87, 92, 99], "default": [5, 16, 24, 27, 31, 34, 35, 37, 74, 75, 77, 81, 83, 85, 87, 92], "current": [5, 31, 34, 39, 77, 97, 99], "rais": [5, 16, 31, 85, 90], "notimplementederror": [5, 31], "If": [5, 16, 31, 34, 74, 77, 81, 97, 99], "been": [5, 31, 97], "backend": [5, 31, 34], "question": 5, "get_map_funct": 5, "nb_file": 5, "map": [5, 15, 16, 34, 35, 85, 87, 88, 90], "pure": [5, 13, 14, 16, 29], "multiprocess": [5, 99], "tupl": [5, 28, 29, 74, 75, 83], "parquet_dataset": [7, 9], "sqlite_dataset": [7, 11], "collect": [13, 14, 26, 95], "i3fram": [13, 14, 16, 28, 29], "frame": [13, 14, 16, 26, 29, 34], "i3extractorcollect": [13, 14], "i3featureextractoricecube86": [13, 15], "i3featureextractoricecubedeepcor": [13, 15], "i3featureextractoricecubeupgrad": [13, 15], "i3pulsenoisetruthflagicecubeupgrad": [13, 15], "i3galacticplanehybridrecoextractor": [13, 17], "i3ntmuonlabelextractor": [13, 18], "i3splinempeicextractor": [13, 23], "inform": [14, 16, 24, 75], "should": [14, 27, 39, 85, 87, 88, 90, 97, 99], "__call__": 14, "icetrai": [14, 28, 29, 93], "keep": 14, "proven": 14, "tabl": [14, 34, 35, 74, 81], "set_fil": 14, "store": [14, 35, 74], "refer": [14, 87], "being": 14, "get": [14, 28, 77, 99], "multipl": [14, 77, 87, 94], "treat": 14, "singl": [14, 87, 88], "pulsemap": [15, 34, 87], "puls": [15, 16, 28, 29, 34, 35], "seri": [15, 16, 28, 29, 35], "86": 15, "nois": [15, 28], "flag": 15, "ad": [15, 74], "kei": [16, 27, 28, 29, 34, 35, 87, 88], "exclude_kei": 16, "dynam": 16, "pars": [16, 75, 82, 83, 84, 85, 90], "call": [16, 29, 34, 74, 81, 94], "tri": [16, 29], "automat": [16, 97], "cast": [16, 29], "done": [16, 94, 97], "recurs": [16, 29, 89, 92], "each": [16, 27, 29, 35, 37, 38, 74, 75, 77, 92], "look": [16, 99], "member": [16, 29, 87, 88, 94], "variabl": [16, 29, 81, 94], "signatur": [16, 29], "similar": [16, 29, 99], "dict": [16, 27, 29, 34, 74, 75, 77, 83, 85, 87, 88, 89, 90], "handl": [16, 83, 94], "hand": 16, "case": [16, 99], "per": [16, 35, 81], "mc": [16, 34, 35], "tree": [16, 34], "trigger": 16, "exclud": [16, 37, 99], "valueerror": 16, "hybrid": 17, "galatict": 17, "plane": 17, "tum": [18, 25], "dnn": [18, 25], "padding_valu": [18, 21], "northeren": 18, "i3particl": 19, "other": [19, 35, 97], "algorithm": 19, "comparison": 19, "quantiti": 20, "select": [21, 39, 81, 87, 97], "queso": 21, "retro": 22, "splinemp": 23, "border": 24, "mctree": [24, 28], "ndarrai": [24, 81], "arrai": [24, 27], "boundari": 24, "volum": 24, "coordin": 24, "particl": [24, 35], "start": [24, 97, 99], "stop": [24, 83], "within": 24, "hard": 24, "i3mctre": 24, "valu": [24, 27, 34, 35, 75, 83, 85], "flatten_nested_dictionari": [26, 27], "serialis": [26, 27], "transpose_list_of_dict": [26, 27], "frame_is_montecarlo": [26, 28], "frame_is_nois": [26, 28], "get_om_keys_and_pulseseri": [26, 28], "is_boost_enum": [26, 29], "is_boost_class": [26, 29], "is_icecube_class": [26, 29], "is_typ": [26, 29], "is_method": [26, 29], "break_cyclic_recurs": [26, 29], "get_member_vari": [26, 29], "cast_object_to_pure_python": [26, 29], "cast_pulse_series_to_pure_python": [26, 29], "manipul": 27, "obj": [27, 29, 89], "parent_kei": 27, "flatten": 27, "nest": 27, "dictionari": [27, 28, 29, 34, 74, 75, 85, 87, 88, 90], "non": [27, 29, 34, 35], "exampl": [27, 39, 87, 88, 99], "d": [27, 97], "b": 27, "c": [27, 99], "2": [27, 74, 75, 87, 99], "a__b": 27, "applic": 27, "combin": [27, 87], "parent": 27, "__": [27, 29], "concaten": 27, "nester": 27, "json": [27, 87], "therefor": 27, "we": [27, 29, 39, 97, 99], "element": [27, 29, 89], "outer": 27, "abl": [27, 99], "de": 27, "transpos": 27, "check": [28, 29, 34, 35, 83, 92, 93, 97, 99], "whether": [28, 29, 34, 35, 89, 92, 93], "mont": 28, "carlo": 28, "simul": 28, "bool": [28, 29, 34, 35, 39, 74, 77, 81, 83, 89, 92, 93, 94], "pulseseri": 28, "calibr": [28, 29], "indici": [28, 39], "gcd_dict": [28, 29], "p": [28, 34], "om": [28, 29], "dataclass": 28, "i3calibr": 28, "indicesfor": 28, "boost": 29, "enum": 29, "fn": [29, 85, 89], "ensur": [29, 38, 94, 97, 99], "isn": 29, "return_discard": 29, "valid": [29, 39, 83, 85, 90], "ignor": 29, "mangl": 29, "take": [29, 34, 97], "mainli": 29, "cannot": [29, 85, 90], "trivial": 29, "doe": [29, 88], "try": 29, "length": [29, 77], "equival": 29, "its": 29, "like": [29, 95, 97], "otherwis": 29, "itself": 29, "deem": 29, "wai": [29, 39, 97, 99], "represent": 29, "optic": 29, "found": 29, "parquetdataconvert": [30, 31], "sqlitedataconvert": [33, 34, 99], "construct_datafram": [33, 34], "is_pulse_map": [33, 34], "is_mc_tre": [33, 34], "database_exist": [33, 35], "database_table_exist": [33, 35], "run_sql_cod": [33, 35], "save_to_sql": [33, 35], "attach_index": [33, 35], "create_t": [33, 35], "create_table_and_save_to_sql": [33, 35], "db": 34, "databas": [34, 35, 37, 74, 81, 99], "max_table_s": 34, "maximum": [34, 83], "row": [34, 35], "given": [34, 81, 83], "exce": 34, "limit": 34, "creat": [34, 35, 85, 86, 90, 97, 99], "any_pulsemap_is_non_empti": 34, "data_dict": 34, "empti": 34, "retriev": 34, "splitinicepuls": 34, "least": [34, 97, 99], "true": [34, 35, 74, 77, 81, 87, 88, 90], "becaus": [34, 38], "instead": [34, 85, 90], "alwai": 34, "panda": [34, 39, 81], "datafram": [34, 35, 39, 74, 81], "table_nam": [34, 35], "database_path": [35, 74, 81], "df": 35, "must": [35, 77, 81, 97], "alreadi": [35, 99], "attach": 35, "queri": [35, 39], "column": [35, 74, 81], "default_typ": 35, "null": 35, "integer_primary_kei": 35, "event_no": [35, 39, 81, 87], "NOT": 35, "integ": 35, "primari": 35, "Such": 35, "uniqu": [35, 37, 87], "appropri": 35, "expect": [35, 39], "doesn": 35, "parquettosqliteconvert": [36, 37], "pairwise_shuffl": [36, 38], "stringselectionresolv": [36, 39], "parquet_path": 37, "mc_truth_tabl": 37, "excluded_field": 37, "assign": [37, 97], "id": 37, "everi": [37, 99], "field": [37, 75, 85, 87, 88, 90], "One": [37, 75], "choos": 37, "argument": [37, 81, 83, 85, 87, 88, 90], "exclude_field": 37, "database_nam": 37, "convers": [37, 99], "directori": [37, 74, 92], "rng": 38, "relat": [38, 92], "i3_list": [38, 92], "gcd_list": [38, 92], "shuffl": 38, "correpond": 38, "handi": 38, "even": 38, "files_list": 38, "gcd_shuffl": 38, "i3_shuffl": 38, "resolv": 39, "indic": [39, 77, 83, 97], "seed": [39, 87], "use_cach": 39, "datasetconfig": [39, 84, 87], "flexibl": 39, "defin": [39, 85, 87, 88, 90], "below": [39, 75, 81, 97, 99], "show": [39, 77], "involv": 39, "cover": 39, "yml": [39, 83, 87, 88], "test": [39, 87, 93, 97], "50000": [39, 87], "ab": [39, 87], "12": [39, 87], "14": [39, 87], "16": [39, 87], "13": [39, 99], "10000": 39, "compat": 39, "syntax": 39, "mai": [39, 99], "also": [39, 87], "specifi": [39, 75, 77, 99], "fix": 39, "randomli": [39, 88], "20": [39, 94], "graphnet_modul": [40, 41], "convnet": [44, 53], "dynedg": [44, 53], "dynedge_jinst": [44, 53], "dynedge_kaggle_tito": [44, 53], "edg": [44, 59], "node": [44, 59], "graph_definit": [44, 59, 87], "config_updat": [73, 74], "weightfitt": [73, 74, 76, 81], "contourfitt": [73, 74], "read_entri": [73, 75], "plot_2d_contour": [73, 75], "plot_1d_contour": [73, 75], "contour": [74, 75], "config_path": 74, "new_config_path": 74, "dummy_sect": 74, "updat": [74, 77], "temp": 74, "dummi": 74, "section": 74, "header": 74, "configupdat": 74, "programat": 74, "truth_tabl": [74, 81, 87], "statistical_fit": 74, "weight": [74, 81, 88, 99], "fit_weight": [74, 81], "config_outdir": 74, "weight_nam": [74, 81], "pisa_config_dict": 74, "add_to_databas": [74, 81], "flux": 74, "self": [74, 85, 90], "_database_path": 74, "statist": 74, "effect": [74, 77, 97], "account": 74, "systemat": 74, "hypersurfac": 74, "chang": [74, 97], "assumpt": 74, "regard": 74, "fals": [74, 77, 81, 87], "two": [74, 77], "pipeline_path": 74, "post_fix": 74, "model_nam": 74, "include_retro": 74, "fit_1d_contour": 74, "run_nam": 74, "config_dict": 74, "grid_siz": 74, "n_worker": 74, "theta23_minmax": 74, "36": 74, "54": 74, "dm31_minmax": 74, "3": [74, 75, 97, 99], "7": 74, "1d": [74, 75], "float": [74, 75, 77, 87], "fit_2d_contour": 74, "2d": [74, 75], "entri": [75, 83], "content": 75, "contour_data": 75, "xlim": 75, "4": 75, "6": 75, "ylim": 75, "0023799999999999997": 75, "0025499999999999997": 75, "chi2_critical_valu": 75, "width": 75, "height": 75, "path_to_pisa_fit_result": 75, "name_of_my_model_in_fit": 75, "legend": 75, "color": 75, "linestyl": 75, "style": [75, 97], "line": [75, 77, 83], "upper": 75, "axi": 75, "605": 75, "critic": [75, 94], "chi2": 75, "90": 75, "cl": 75, "note": [75, 88], "right": 75, "176": 75, "inch": 75, "388": 75, "706": 75, "abov": [75, 81, 99], "352": 75, "piecewiselinearlr": [76, 77], "progressbar": [76, 77], "uniform": [76, 81], "bjoernlow": [76, 81], "dure": 77, "optim": 77, "mileston": 77, "factor": 77, "last_epoch": 77, "verbos": 77, "_lrschedul": 77, "interpol": 77, "linearli": 77, "between": [77, 87, 88], "denot": 77, "step": 77, "multipli": 77, "closest": 77, "befor": 77, "vice": 77, "versa": 77, "after": [77, 83, 87], "last": 77, "wrap": [77, 87, 88], "epoch": [77, 83], "print": [77, 94], "messag": [77, 94], "stdout": 77, "get_lr": 77, "refresh_r": 77, "process_posit": 77, "tqdmprogressbar": 77, "custom": 77, "progress": 77, "bar": 77, "customis": 77, "pytorch": [77, 99], "lightn": 77, "init_validation_tqdm": 77, "overrid": 77, "init_predict_tqdm": 77, "init_test_tqdm": 77, "init_train_tqdm": 77, "get_metr": 77, "trainer": 77, "version": [77, 97, 99], "lightningmodul": [77, 94], "on_train_epoch_start": 77, "previou": 77, "see": [77, 97, 99], "loss": [77, 83], "metric": 77, "behaviour": 77, "overwrit": 77, "on_train_epoch_end": 77, "don": [77, 99], "duplciat": 77, "produc": 81, "public": 81, "uniformweightfitt": 81, "bin": 81, "kwarg": [81, 85, 87, 88, 94], "privat": 81, "_fit_weight": 81, "sql": 81, "desir": [81, 92], "space": 81, "np": 81, "log10": 81, "happen": 81, "addit": 81, "pass": [81, 97], "distribut": 81, "x_low": 81, "wherea": 81, "curv": 81, "base_config": [82, 84], "dataset_config": [82, 84], "model_config": [82, 84, 85, 87, 90], "training_config": [82, 84], "argumentpars": [82, 83], "is_gcd_fil": [82, 92], "is_i3_fil": [82, 92], "has_extens": [82, 92], "find_i3_fil": [82, 92], "has_icecube_packag": [82, 93], "has_torch_packag": [82, 93], "has_pisa_packag": [82, 93], "requires_icecub": [82, 93], "repeatfilt": [82, 94], "eps_lik": [82, 95], "consist": [83, 94, 97], "cli": 83, "present": [83, 92, 93], "pop_default": 83, "remov": 83, "usag": 83, "descript": 83, "command": [83, 99], "standard_argu": 83, "size": 83, "128": 83, "help": [83, 97], "home": [83, 99], "runner": 83, "local": 83, "lib": [83, 99], "python3": 83, "training_example_data_sqlit": 83, "earli": 83, "patienc": 83, "gpu": [83, 99], "narg": 83, "max": 83, "50": 83, "example_energy_reconstruction_model": 83, "num": 83, "fetch": 83, "with_standard_argu": 83, "arg": [83, 85, 90, 94], "add": [83, 97, 99], "overwritten": [83, 85], "baseconfig": [84, 85, 86, 87, 88, 90], "get_all_argument_valu": [84, 85], "save_dataset_config": [84, 87], "datasetconfigsavermeta": [84, 87], "datasetconfigsaverabcmeta": [84, 87], "modelconfig": [84, 87, 88], "save_model_config": [84, 88], "modelconfigsavermeta": [84, 88], "modelconfigsaverabc": [84, 88], "traverse_and_appli": [84, 89], "list_all_submodul": [84, 89], "get_all_grapnet_class": [84, 89], "is_graphnet_modul": [84, 89], "is_graphnet_class": [84, 89], "get_graphnet_class": [84, 89], "trainingconfig": [84, 90], "basemodel": [85, 87, 88], "keyword": [85, 90], "validationerror": [85, 90], "pydantic_cor": [85, 90], "__init__": [85, 87, 88, 90, 99], "__pydantic_self__": [85, 90], "classmethod": [85, 86], "dump": [85, 87, 88], "yaml": [85, 86], "as_dict": [85, 87, 88], "repres": [85, 87, 88], "classvar": [85, 87, 88, 90], "configdict": [85, 87, 88, 90], "conform": [85, 87, 88, 90], "pydant": [85, 87, 88, 90], "model_field": [85, 87, 88, 90], "fieldinfo": [85, 87, 88, 90], "metadata": [85, 87, 88, 90], "about": [85, 87, 88, 90], "__fields__": [85, 87, 88, 90], "v1": [85, 87, 88, 90, 99], "re": [86, 99], "save_config": 86, "from_config": [86, 87, 88], "node_truth": 87, "node_truth_t": 87, "string_select": 87, "loss_weight_t": 87, "loss_weight_column": 87, "loss_weight_default_valu": 87, "dataconfig": 87, "transpar": [87, 88, 97], "reproduc": [87, 88], "In": [87, 88, 99], "session": [87, 88], "anoth": [87, 88], "you": [87, 88, 97, 99], "ensembledataset": 87, "still": 87, "csv": 87, "train_select": 87, "test_select": 87, "unambigu": [87, 88], "annot": [87, 88, 90], "nonetyp": 87, "init_fn": [87, 88], "metaclass": [87, 88], "abcmeta": [87, 88], "datasetconfigsav": 87, "class_nam": [88, 94], "trainabl": 88, "hyperparamet": 88, "instanti": 88, "initialis": 88, "thu": 88, "modelconfigsav": 88, "fn_kwarg": 89, "structur": 89, "moduletyp": 89, "grapnet": 89, "lookup": 89, "target": 90, "early_stopping_pati": 90, "system": [92, 99], "filenam": 92, "dir": 92, "search": 92, "test_funct": 93, "filter": 94, "out": [94, 97, 99], "repeat": 94, "nb_repeats_allow": 94, "record": 94, "logrecord": 94, "log_fold": 94, "clear": 94, "intuit": 94, "composit": 94, "rather": 94, "loggeradapt": 94, "chosen": 94, "avoid": [94, 97], "clash": 94, "pytorch_lightn": 94, "setlevel": 94, "deleg": 94, "msg": 94, "error": [94, 97], "warn": 94, "info": [94, 99], "debug": 94, "warning_onc": 94, "exactli": 94, "onc": 94, "handler": 94, "file_handl": 94, "filehandl": 94, "stream_handl": 94, "streamhandl": 94, "assort": 95, "tensor": 95, "ep": 95, "dtype": 95, "api": 96, "To": [97, 99], "sure": [97, 99], "smooth": 97, "guidelin": 97, "guid": 97, "encourag": 97, "contributor": 97, "discuss": 97, "bug": 97, "anyth": 97, "place": 97, "describ": 97, "altern": 97, "yourself": 97, "ownership": 97, "particular": 97, "activ": [97, 99], "prioriti": 97, "situat": 97, "lot": 97, "effort": 97, "go": 97, "turn": 97, "outsid": 97, "scope": 97, "solut": 97, "better": 97, "fork": 97, "repo": 97, "dedic": 97, "branch": [97, 99], "your": [97, 99], "repositori": 97, "graphdefinit": 97, "euclidean": 97, "definit": 97, "own": [97, 99], "team": 97, "accept": 97, "autom": 97, "review": 97, "pep8": 97, "docstr": 97, "googl": 97, "hint": 97, "clean": [97, 99], "8": [97, 99], "adher": 97, "pep": 97, "pylint": 97, "flake8": 97, "black": 97, "well": 97, "recommend": [97, 99], "mypi": 97, "pydocstyl": 97, "docformatt": 97, "commit": 97, "hook": 97, "instal": 97, "come": 97, "tag": [97, 99], "pip": [97, 99], "Then": 97, "everytim": 97, "pep257": 97, "static": 97, "concept": 97, "http": 97, "ljvmiranda921": 97, "io": 97, "notebook": 97, "2018": 97, "06": 97, "21": 97, "precommit": 97, "environ": 99, "virtual": 99, "anaconda": 99, "prove": 99, "instruct": 99, "setup": 99, "want": 99, "part": 99, "runtim": 99, "achiev": 99, "bash": 99, "shell": 99, "eval": 99, "cvmf": 99, "opensciencegrid": 99, "org": 99, "py3": 99, "v4": 99, "sh": 99, "rhel_7_x86_64": 99, "metaproject": 99, "env": 99, "alia": 99, "script": 99, "With": 99, "now": 99, "light": 99, "extra": 99, "geometr": 99, "just": 99, "won": 99, "later": 99, "r": 99, "torch_cpu": 99, "txt": 99, "cpu": 99, "torch_gpu": 99, "prefer": 99, "unix": 99, "git": 99, "clone": 99, "github": 99, "com": 99, "usernam": 99, "cd": 99, "conda": 99, "gcc_linux": 99, "64": 99, "gxx_linux": 99, "libgcc": 99, "cudatoolkit": 99, "11": 99, "forg": 99, "torch_maco": 99, "On": 99, "maco": 99, "box": 99, "compil": 99, "gcc": 99, "date": 99, "possibli": 99, "cuda": 99, "toolkit": 99, "recent": 99, "omit": 99, "newer": 99, "export": 99, "ld_library_path": 99, "anaconda3": 99, "miniconda3": 99, "bashrc": 99, "librari": 99, "access": 99, "so": 99, "intend": 99, "consid": 99, "rm": 99, "asogaard": 99, "latest": 99, "dc423315742c": 99, "01_icetrai": 99, "01_convert_i3_fil": 99, "py": 99, "2023": 99, "01": 99, "24": 99, "41": 99, "27": 99, "write": 99, "graphnet_20230124": 99, "134127": 99, "46": 99, "root": 99, "convert_i3_fil": 99, "ic86": 99, "thread": 99, "100": 99, "00": 99, "79": 99, "42": 99, "26": 99, "413": 99, "88it": 99, "specialis": 99, "ones": 99, "push": 99, "vx": 99}, "objects": {"": [[1, 0, 0, "-", "graphnet"]], "graphnet": [[2, 0, 0, "-", "constants"], [3, 0, 0, "-", "data"], [40, 0, 0, "-", "deployment"], [73, 0, 0, "-", "pisa"], [76, 0, 0, "-", "training"], [82, 0, 0, "-", "utilities"]], "graphnet.data": [[4, 0, 0, "-", "constants"], [5, 0, 0, "-", "dataconverter"], [13, 0, 0, "-", "extractors"], [30, 0, 0, "-", "parquet"], [33, 0, 0, "-", "sqlite"], [36, 0, 0, "-", "utilities"]], "graphnet.data.constants": [[4, 1, 1, "", "FEATURES"], [4, 1, 1, "", "TRUTH"]], "graphnet.data.constants.FEATURES": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.constants.TRUTH": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.dataconverter": [[5, 1, 1, "", "DataConverter"], [5, 1, 1, "", "FileSet"], [5, 5, 1, "", "cache_output_files"], [5, 5, 1, "", "init_global_index"]], "graphnet.data.dataconverter.DataConverter": [[5, 3, 1, "", "execute"], [5, 4, 1, "", "file_suffix"], [5, 3, 1, "", "get_map_function"], [5, 3, 1, "", "merge_files"], [5, 3, 1, "", "save_data"]], "graphnet.data.dataconverter.FileSet": [[5, 2, 1, "", "gcd_file"], [5, 2, 1, "", "i3_file"]], "graphnet.data.extractors": [[14, 0, 0, "-", "i3extractor"], [15, 0, 0, "-", "i3featureextractor"], [16, 0, 0, "-", "i3genericextractor"], [17, 0, 0, "-", "i3hybridrecoextractor"], [18, 0, 0, "-", "i3ntmuonlabelsextractor"], [19, 0, 0, "-", "i3particleextractor"], [20, 0, 0, "-", "i3pisaextractor"], [21, 0, 0, "-", "i3quesoextractor"], [22, 0, 0, "-", "i3retroextractor"], [23, 0, 0, "-", "i3splinempeextractor"], [24, 0, 0, "-", "i3truthextractor"], [25, 0, 0, "-", "i3tumextractor"], [26, 0, 0, "-", "utilities"]], "graphnet.data.extractors.i3extractor": [[14, 1, 1, "", "I3Extractor"], [14, 1, 1, "", "I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor.I3Extractor": [[14, 4, 1, "", "name"], [14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3extractor.I3ExtractorCollection": [[14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3featureextractor": [[15, 1, 1, "", "I3FeatureExtractor"], [15, 1, 1, "", "I3FeatureExtractorIceCube86"], [15, 1, 1, "", "I3FeatureExtractorIceCubeDeepCore"], [15, 1, 1, "", "I3FeatureExtractorIceCubeUpgrade"], [15, 1, 1, "", "I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3genericextractor": [[16, 1, 1, "", "I3GenericExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, 1, 1, "", "I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, 1, 1, "", "I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, 1, 1, "", "I3ParticleExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, 1, 1, "", "I3PISAExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, 1, 1, "", "I3QUESOExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, 1, 1, "", "I3RetroExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, 1, 1, "", "I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, 1, 1, "", "I3TruthExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, 1, 1, "", "I3TUMExtractor"]], "graphnet.data.extractors.utilities": [[27, 0, 0, "-", "collections"], [28, 0, 0, "-", "frames"], [29, 0, 0, "-", "types"]], "graphnet.data.extractors.utilities.collections": [[27, 5, 1, "", "flatten_nested_dictionary"], [27, 5, 1, "", "serialise"], [27, 5, 1, "", "transpose_list_of_dicts"]], "graphnet.data.extractors.utilities.frames": [[28, 5, 1, "", "frame_is_montecarlo"], [28, 5, 1, "", "frame_is_noise"], [28, 5, 1, "", "get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.types": [[29, 5, 1, "", "break_cyclic_recursion"], [29, 5, 1, "", "cast_object_to_pure_python"], [29, 5, 1, "", "cast_pulse_series_to_pure_python"], [29, 5, 1, "", "get_member_variables"], [29, 5, 1, "", "is_boost_class"], [29, 5, 1, "", "is_boost_enum"], [29, 5, 1, "", "is_icecube_class"], [29, 5, 1, "", "is_method"], [29, 5, 1, "", "is_type"]], "graphnet.data.parquet": [[31, 0, 0, "-", "parquet_dataconverter"]], "graphnet.data.parquet.parquet_dataconverter": [[31, 1, 1, "", "ParquetDataConverter"]], "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter": [[31, 2, 1, "", "file_suffix"], [31, 3, 1, "", "merge_files"], [31, 3, 1, "", "save_data"]], "graphnet.data.sqlite": [[34, 0, 0, "-", "sqlite_dataconverter"], [35, 0, 0, "-", "sqlite_utilities"]], "graphnet.data.sqlite.sqlite_dataconverter": [[34, 1, 1, "", "SQLiteDataConverter"], [34, 5, 1, "", "construct_dataframe"], [34, 5, 1, "", "is_mc_tree"], [34, 5, 1, "", "is_pulse_map"]], "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter": [[34, 3, 1, "", "any_pulsemap_is_non_empty"], [34, 2, 1, "", "file_suffix"], [34, 3, 1, "", "merge_files"], [34, 3, 1, "", "save_data"]], "graphnet.data.sqlite.sqlite_utilities": [[35, 5, 1, "", "attach_index"], [35, 5, 1, "", "create_table"], [35, 5, 1, "", "create_table_and_save_to_sql"], [35, 5, 1, "", "database_exists"], [35, 5, 1, "", "database_table_exists"], [35, 5, 1, "", "run_sql_code"], [35, 5, 1, "", "save_to_sql"]], "graphnet.data.utilities": [[37, 0, 0, "-", "parquet_to_sqlite"], [38, 0, 0, "-", "random"], [39, 0, 0, "-", "string_selection_resolver"]], "graphnet.data.utilities.parquet_to_sqlite": [[37, 1, 1, "", "ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter": [[37, 3, 1, "", "run"]], "graphnet.data.utilities.random": [[38, 5, 1, "", "pairwise_shuffle"]], "graphnet.data.utilities.string_selection_resolver": [[39, 1, 1, "", "StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver": [[39, 3, 1, "", "resolve"]], "graphnet.pisa": [[74, 0, 0, "-", "fitting"], [75, 0, 0, "-", "plotting"]], "graphnet.pisa.fitting": [[74, 1, 1, "", "ContourFitter"], [74, 1, 1, "", "WeightFitter"], [74, 5, 1, "", "config_updater"]], "graphnet.pisa.fitting.ContourFitter": [[74, 3, 1, "", "fit_1d_contour"], [74, 3, 1, "", "fit_2d_contour"]], "graphnet.pisa.fitting.WeightFitter": [[74, 3, 1, "", "fit_weights"]], "graphnet.pisa.plotting": [[75, 5, 1, "", "plot_1D_contour"], [75, 5, 1, "", "plot_2D_contour"], [75, 5, 1, "", "read_entry"]], "graphnet.training": [[77, 0, 0, "-", "callbacks"], [81, 0, 0, "-", "weight_fitting"]], "graphnet.training.callbacks": [[77, 1, 1, "", "PiecewiseLinearLR"], [77, 1, 1, "", "ProgressBar"]], "graphnet.training.callbacks.PiecewiseLinearLR": [[77, 3, 1, "", "get_lr"]], "graphnet.training.callbacks.ProgressBar": [[77, 3, 1, "", "get_metrics"], [77, 3, 1, "", "init_predict_tqdm"], [77, 3, 1, "", "init_test_tqdm"], [77, 3, 1, "", "init_train_tqdm"], [77, 3, 1, "", "init_validation_tqdm"], [77, 3, 1, "", "on_train_epoch_end"], [77, 3, 1, "", "on_train_epoch_start"]], "graphnet.training.weight_fitting": [[81, 1, 1, "", "BjoernLow"], [81, 1, 1, "", "Uniform"], [81, 1, 1, "", "WeightFitter"]], "graphnet.training.weight_fitting.WeightFitter": [[81, 3, 1, "", "fit"]], "graphnet.utilities": [[83, 0, 0, "-", "argparse"], [84, 0, 0, "-", "config"], [91, 0, 0, "-", "decorators"], [92, 0, 0, "-", "filesys"], [93, 0, 0, "-", "imports"], [94, 0, 0, "-", "logging"], [95, 0, 0, "-", "maths"]], "graphnet.utilities.argparse": [[83, 1, 1, "", "ArgumentParser"], [83, 1, 1, "", "Options"]], "graphnet.utilities.argparse.ArgumentParser": [[83, 2, 1, "", "standard_arguments"], [83, 3, 1, "", "with_standard_arguments"]], "graphnet.utilities.argparse.Options": [[83, 3, 1, "", "contains"], [83, 3, 1, "", "pop_default"]], "graphnet.utilities.config": [[85, 0, 0, "-", "base_config"], [86, 0, 0, "-", "configurable"], [87, 0, 0, "-", "dataset_config"], [88, 0, 0, "-", "model_config"], [89, 0, 0, "-", "parsing"], [90, 0, 0, "-", "training_config"]], "graphnet.utilities.config.base_config": [[85, 1, 1, "", "BaseConfig"], [85, 5, 1, "", "get_all_argument_values"]], "graphnet.utilities.config.base_config.BaseConfig": [[85, 3, 1, "", "as_dict"], [85, 3, 1, "", "dump"], [85, 3, 1, "", "load"], [85, 2, 1, "", "model_config"], [85, 2, 1, "", "model_fields"]], "graphnet.utilities.config.configurable": [[86, 1, 1, "", "Configurable"]], "graphnet.utilities.config.configurable.Configurable": [[86, 4, 1, "", "config"], [86, 3, 1, "", "from_config"], [86, 3, 1, "", "save_config"]], "graphnet.utilities.config.dataset_config": [[87, 1, 1, "", "DatasetConfig"], [87, 1, 1, "", "DatasetConfigSaverABCMeta"], [87, 1, 1, "", "DatasetConfigSaverMeta"], [87, 5, 1, "", "save_dataset_config"]], "graphnet.utilities.config.dataset_config.DatasetConfig": [[87, 3, 1, "", "as_dict"], [87, 2, 1, "", "features"], [87, 2, 1, "", "graph_definition"], [87, 2, 1, "", "index_column"], [87, 2, 1, "", "loss_weight_column"], [87, 2, 1, "", "loss_weight_default_value"], [87, 2, 1, "", "loss_weight_table"], [87, 2, 1, "", "model_config"], [87, 2, 1, "", "model_fields"], [87, 2, 1, "", "node_truth"], [87, 2, 1, "", "node_truth_table"], [87, 2, 1, "", "path"], [87, 2, 1, "", "pulsemaps"], [87, 2, 1, "", "seed"], [87, 2, 1, "", "selection"], [87, 2, 1, "", "string_selection"], [87, 2, 1, "", "truth"], [87, 2, 1, "", "truth_table"]], "graphnet.utilities.config.model_config": [[88, 1, 1, "", "ModelConfig"], [88, 1, 1, "", "ModelConfigSaverABC"], [88, 1, 1, "", "ModelConfigSaverMeta"], [88, 5, 1, "", "save_model_config"]], "graphnet.utilities.config.model_config.ModelConfig": [[88, 2, 1, "", "arguments"], [88, 3, 1, "", "as_dict"], [88, 2, 1, "", "class_name"], [88, 2, 1, "", "model_config"], [88, 2, 1, "", "model_fields"]], "graphnet.utilities.config.parsing": [[89, 5, 1, "", "get_all_grapnet_classes"], [89, 5, 1, "", "get_graphnet_classes"], [89, 5, 1, "", "is_graphnet_class"], [89, 5, 1, "", "is_graphnet_module"], [89, 5, 1, "", "list_all_submodules"], [89, 5, 1, "", "traverse_and_apply"]], "graphnet.utilities.config.training_config": [[90, 1, 1, "", "TrainingConfig"]], "graphnet.utilities.config.training_config.TrainingConfig": [[90, 2, 1, "", "dataloader"], [90, 2, 1, "", "early_stopping_patience"], [90, 2, 1, "", "fit"], [90, 2, 1, "", "model_config"], [90, 2, 1, "", "model_fields"], [90, 2, 1, "", "target"]], "graphnet.utilities.filesys": [[92, 5, 1, "", "find_i3_files"], [92, 5, 1, "", "has_extension"], [92, 5, 1, "", "is_gcd_file"], [92, 5, 1, "", "is_i3_file"]], "graphnet.utilities.imports": [[93, 5, 1, "", "has_icecube_package"], [93, 5, 1, "", "has_pisa_package"], [93, 5, 1, "", "has_torch_package"], [93, 5, 1, "", "requires_icecube"]], "graphnet.utilities.logging": [[94, 1, 1, "", "Logger"], [94, 1, 1, "", "RepeatFilter"]], "graphnet.utilities.logging.Logger": [[94, 3, 1, "", "critical"], [94, 3, 1, "", "debug"], [94, 3, 1, "", "error"], [94, 4, 1, "", "file_handlers"], [94, 4, 1, "", "handlers"], [94, 3, 1, "", "info"], [94, 3, 1, "", "setLevel"], [94, 4, 1, "", "stream_handlers"], [94, 3, 1, "", "warning"], [94, 3, 1, "", "warning_once"]], "graphnet.utilities.logging.RepeatFilter": [[94, 3, 1, "", "filter"], [94, 2, 1, "", "nb_repeats_allowed"]], "graphnet.utilities.maths": [[95, 5, 1, "", "eps_like"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:attribute", "3": "py:method", "4": "py:property", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "attribute", "Python attribute"], "3": ["py", "method", "Python method"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"]}, "titleterms": {"about": [0, 98], "impact": [0, 98], "usag": [0, 98], "acknowledg": [0, 98], "api": 1, "constant": [2, 4], "data": 3, "dataconvert": 5, "dataload": 6, "dataset": [7, 8], "parquet": [9, 30], "parquet_dataset": 10, "sqlite": [11, 33], "sqlite_dataset": 12, "extractor": 13, "i3extractor": 14, "i3featureextractor": 15, "i3genericextractor": 16, "i3hybridrecoextractor": 17, "i3ntmuonlabelsextractor": 18, "i3particleextractor": 19, "i3pisaextractor": 20, "i3quesoextractor": 21, "i3retroextractor": 22, "i3splinempeextractor": 23, "i3truthextractor": 24, "i3tumextractor": 25, "util": [26, 36, 72, 80, 82], "collect": 27, "frame": 28, "type": 29, "parquet_dataconvert": 31, "pipelin": 32, "sqlite_dataconvert": 34, "sqlite_util": 35, "parquet_to_sqlit": 37, "random": 38, "string_selection_resolv": 39, "deploy": [40, 42], "i3modul": 41, "graphnet_modul": 43, "model": [44, 66], "coarsen": 45, "compon": 46, "layer": 47, "pool": 48, "detector": [49, 50], "icecub": 51, "prometheu": 52, "gnn": [53, 58], "convnet": 54, "dynedg": 55, "dynedge_jinst": 56, "dynedge_kaggle_tito": 57, "graph": [59, 63], "edg": [60, 61], "graph_definit": 62, "node": [64, 65], "standard_model": 67, "task": [68, 71], "classif": 69, "reconstruct": 70, "pisa": 73, "fit": 74, "plot": 75, "train": 76, "callback": 77, "label": 78, "loss_funct": 79, "weight_fit": 81, "argpars": 83, "config": 84, "base_config": 85, "configur": 86, "dataset_config": 87, "model_config": 88, "pars": 89, "training_config": 90, "decor": 91, "filesi": 92, "import": 93, "log": 94, "math": 95, "src": 96, "contribut": 97, "github": 97, "issu": 97, "pull": 97, "request": 97, "convent": 97, "code": 97, "qualiti": 97, "instal": 99, "icetrai": 99, "stand": 99, "alon": 99, "run": 99, "docker": 99}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"About": [[0, "about"], [98, "about"]], "Impact": [[0, "impact"], [98, "impact"]], "Usage": [[0, "usage"], [98, "usage"]], "Acknowledgements": [[0, "acknowledgements"], [98, "acknowledgements"]], "API": [[1, "module-graphnet"]], "constants": [[2, "module-graphnet.constants"], [4, "module-graphnet.data.constants"]], "data": [[3, "module-graphnet.data"]], "dataconverter": [[5, "module-graphnet.data.dataconverter"]], "dataloader": [[6, "dataloader"]], "dataset": [[7, "dataset"], [8, "dataset"]], "parquet": [[9, "parquet"], [30, "module-graphnet.data.parquet"]], "parquet_dataset": [[10, "parquet-dataset"]], "sqlite": [[11, "sqlite"], [33, "module-graphnet.data.sqlite"]], "sqlite_dataset": [[12, "sqlite-dataset"]], "extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "utilities": [[26, "module-graphnet.data.extractors.utilities"], [36, "module-graphnet.data.utilities"], [82, "module-graphnet.utilities"]], "collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "types": [[29, "module-graphnet.data.extractors.utilities.types"]], "parquet_dataconverter": [[31, "module-graphnet.data.parquet.parquet_dataconverter"]], "pipeline": [[32, "pipeline"]], "sqlite_dataconverter": [[34, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "sqlite_utilities": [[35, "module-graphnet.data.sqlite.sqlite_utilities"]], "parquet_to_sqlite": [[37, "module-graphnet.data.utilities.parquet_to_sqlite"]], "random": [[38, "module-graphnet.data.utilities.random"]], "string_selection_resolver": [[39, "module-graphnet.data.utilities.string_selection_resolver"]], "deployment": [[40, "module-graphnet.deployment"]], "i3modules": [[41, "i3modules"]], "deployer": [[42, "deployer"]], "graphnet_module": [[43, "graphnet-module"]], "models": [[44, "models"]], "coarsening": [[45, "coarsening"]], "components": [[46, "components"]], "layers": [[47, "layers"]], "pool": [[48, "pool"]], "detector": [[49, "detector"], [50, "detector"]], "icecube": [[51, "icecube"]], "prometheus": [[52, "prometheus"]], "gnn": [[53, "gnn"], [58, "gnn"]], "convnet": [[54, "convnet"]], "dynedge": [[55, "dynedge"]], "dynedge_jinst": [[56, "dynedge-jinst"]], "dynedge_kaggle_tito": [[57, "dynedge-kaggle-tito"]], "graphs": [[59, "graphs"], [63, "graphs"]], "edges": [[60, "edges"], [61, "edges"]], "graph_definition": [[62, "graph-definition"]], "nodes": [[64, "nodes"], [65, "nodes"]], "model": [[66, "model"]], "standard_model": [[67, "standard-model"]], "task": [[68, "task"], [71, "task"]], "classification": [[69, "classification"]], "reconstruction": [[70, "reconstruction"]], "utils": [[72, "utils"], [80, "utils"]], "pisa": [[73, "module-graphnet.pisa"]], "fitting": [[74, "module-graphnet.pisa.fitting"]], "plotting": [[75, "module-graphnet.pisa.plotting"]], "training": [[76, "module-graphnet.training"]], "callbacks": [[77, "module-graphnet.training.callbacks"]], "labels": [[78, "labels"]], "loss_functions": [[79, "loss-functions"]], "weight_fitting": [[81, "module-graphnet.training.weight_fitting"]], "argparse": [[83, "module-graphnet.utilities.argparse"]], "config": [[84, "module-graphnet.utilities.config"]], "base_config": [[85, "module-graphnet.utilities.config.base_config"]], "configurable": [[86, "module-graphnet.utilities.config.configurable"]], "dataset_config": [[87, "module-graphnet.utilities.config.dataset_config"]], "model_config": [[88, "module-graphnet.utilities.config.model_config"]], "parsing": [[89, "module-graphnet.utilities.config.parsing"]], "training_config": [[90, "module-graphnet.utilities.config.training_config"]], "decorators": [[91, "module-graphnet.utilities.decorators"]], "filesys": [[92, "module-graphnet.utilities.filesys"]], "imports": [[93, "module-graphnet.utilities.imports"]], "logging": [[94, "module-graphnet.utilities.logging"]], "maths": [[95, "module-graphnet.utilities.maths"]], "src": [[96, "src"]], "Contribute": [[97, "contribute"]], "GitHub issues": [[97, "github-issues"]], "Pull requests": [[97, "pull-requests"]], "Conventions": [[97, "conventions"]], "Code quality": [[97, "code-quality"]], "Install": [[99, "install"]], "Installing with IceTray": [[99, "installing-with-icetray"]], "Installing stand-alone": [[99, "installing-stand-alone"]], "Running in Docker": [[99, "running-in-docker"]]}, "indexentries": {"graphnet": [[1, "module-graphnet"]], "module": [[1, "module-graphnet"], [2, "module-graphnet.constants"], [3, "module-graphnet.data"], [4, "module-graphnet.data.constants"], [5, "module-graphnet.data.dataconverter"], [13, "module-graphnet.data.extractors"], [14, "module-graphnet.data.extractors.i3extractor"], [15, "module-graphnet.data.extractors.i3featureextractor"], [16, "module-graphnet.data.extractors.i3genericextractor"], [17, "module-graphnet.data.extractors.i3hybridrecoextractor"], [18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"], [19, "module-graphnet.data.extractors.i3particleextractor"], [20, "module-graphnet.data.extractors.i3pisaextractor"], [21, "module-graphnet.data.extractors.i3quesoextractor"], [22, "module-graphnet.data.extractors.i3retroextractor"], [23, "module-graphnet.data.extractors.i3splinempeextractor"], [24, "module-graphnet.data.extractors.i3truthextractor"], [25, "module-graphnet.data.extractors.i3tumextractor"], [26, "module-graphnet.data.extractors.utilities"], [27, "module-graphnet.data.extractors.utilities.collections"], [28, "module-graphnet.data.extractors.utilities.frames"], [29, "module-graphnet.data.extractors.utilities.types"], [30, "module-graphnet.data.parquet"], [31, "module-graphnet.data.parquet.parquet_dataconverter"], [33, "module-graphnet.data.sqlite"], [34, "module-graphnet.data.sqlite.sqlite_dataconverter"], [35, "module-graphnet.data.sqlite.sqlite_utilities"], [36, "module-graphnet.data.utilities"], [37, "module-graphnet.data.utilities.parquet_to_sqlite"], [38, "module-graphnet.data.utilities.random"], [39, "module-graphnet.data.utilities.string_selection_resolver"], [40, "module-graphnet.deployment"], [73, "module-graphnet.pisa"], [74, "module-graphnet.pisa.fitting"], [75, "module-graphnet.pisa.plotting"], [76, "module-graphnet.training"], [77, "module-graphnet.training.callbacks"], [81, "module-graphnet.training.weight_fitting"], [82, "module-graphnet.utilities"], [83, "module-graphnet.utilities.argparse"], [84, "module-graphnet.utilities.config"], [85, "module-graphnet.utilities.config.base_config"], [86, "module-graphnet.utilities.config.configurable"], [87, "module-graphnet.utilities.config.dataset_config"], [88, "module-graphnet.utilities.config.model_config"], [89, "module-graphnet.utilities.config.parsing"], [90, "module-graphnet.utilities.config.training_config"], [91, "module-graphnet.utilities.decorators"], [92, "module-graphnet.utilities.filesys"], [93, "module-graphnet.utilities.imports"], [94, "module-graphnet.utilities.logging"], [95, "module-graphnet.utilities.maths"]], "graphnet.constants": [[2, "module-graphnet.constants"]], "graphnet.data": [[3, "module-graphnet.data"]], "deepcore (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.DEEPCORE"]], "deepcore (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.DEEPCORE"]], "features (class in graphnet.data.constants)": [[4, "graphnet.data.constants.FEATURES"]], "icecube86 (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.ICECUBE86"]], "icecube86 (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.ICECUBE86"]], "kaggle (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.KAGGLE"]], "kaggle (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.KAGGLE"]], "prometheus (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.PROMETHEUS"]], "prometheus (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.PROMETHEUS"]], "truth (class in graphnet.data.constants)": [[4, "graphnet.data.constants.TRUTH"]], "upgrade (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.UPGRADE"]], "upgrade (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.UPGRADE"]], "graphnet.data.constants": [[4, "module-graphnet.data.constants"]], "dataconverter (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.DataConverter"]], "fileset (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.FileSet"]], "cache_output_files() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.cache_output_files"]], "execute() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.execute"]], "file_suffix (graphnet.data.dataconverter.dataconverter property)": [[5, "graphnet.data.dataconverter.DataConverter.file_suffix"]], "gcd_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.gcd_file"]], "get_map_function() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.get_map_function"]], "graphnet.data.dataconverter": [[5, "module-graphnet.data.dataconverter"]], "i3_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.i3_file"]], "init_global_index() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.init_global_index"]], "merge_files() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.merge_files"]], "save_data() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.save_data"]], "graphnet.data.extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor"]], "i3extractorcollection (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "name (graphnet.data.extractors.i3extractor.i3extractor property)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.name"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractor method)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.set_files"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractorcollection method)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection.set_files"]], "i3featureextractor (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractor"]], "i3featureextractoricecube86 (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCube86"]], "i3featureextractoricecubedeepcore (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeDeepCore"]], "i3featureextractoricecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeUpgrade"]], "i3pulsenoisetruthflagicecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor (class in graphnet.data.extractors.i3genericextractor)": [[16, "graphnet.data.extractors.i3genericextractor.I3GenericExtractor"]], "graphnet.data.extractors.i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3galacticplanehybridrecoextractor (class in graphnet.data.extractors.i3hybridrecoextractor)": [[17, "graphnet.data.extractors.i3hybridrecoextractor.I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelextractor (class in graphnet.data.extractors.i3ntmuonlabelsextractor)": [[18, "graphnet.data.extractors.i3ntmuonlabelsextractor.I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor (class in graphnet.data.extractors.i3particleextractor)": [[19, "graphnet.data.extractors.i3particleextractor.I3ParticleExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor (class in graphnet.data.extractors.i3pisaextractor)": [[20, "graphnet.data.extractors.i3pisaextractor.I3PISAExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor (class in graphnet.data.extractors.i3quesoextractor)": [[21, "graphnet.data.extractors.i3quesoextractor.I3QUESOExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor (class in graphnet.data.extractors.i3retroextractor)": [[22, "graphnet.data.extractors.i3retroextractor.I3RetroExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeicextractor (class in graphnet.data.extractors.i3splinempeextractor)": [[23, "graphnet.data.extractors.i3splinempeextractor.I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor (class in graphnet.data.extractors.i3truthextractor)": [[24, "graphnet.data.extractors.i3truthextractor.I3TruthExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor (class in graphnet.data.extractors.i3tumextractor)": [[25, "graphnet.data.extractors.i3tumextractor.I3TUMExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "graphnet.data.extractors.utilities": [[26, "module-graphnet.data.extractors.utilities"]], "flatten_nested_dictionary() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.flatten_nested_dictionary"]], "graphnet.data.extractors.utilities.collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "serialise() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.serialise"]], "transpose_list_of_dicts() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.transpose_list_of_dicts"]], "frame_is_montecarlo() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_montecarlo"]], "frame_is_noise() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_noise"]], "get_om_keys_and_pulseseries() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "break_cyclic_recursion() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.break_cyclic_recursion"]], "cast_object_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_object_to_pure_python"]], "cast_pulse_series_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_pulse_series_to_pure_python"]], "get_member_variables() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.get_member_variables"]], "graphnet.data.extractors.utilities.types": [[29, "module-graphnet.data.extractors.utilities.types"]], "is_boost_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_class"]], "is_boost_enum() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_enum"]], "is_icecube_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_icecube_class"]], "is_method() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_method"]], "is_type() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_type"]], "graphnet.data.parquet": [[30, "module-graphnet.data.parquet"]], "parquetdataconverter (class in graphnet.data.parquet.parquet_dataconverter)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter"]], "file_suffix (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter attribute)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.file_suffix"]], "graphnet.data.parquet.parquet_dataconverter": [[31, "module-graphnet.data.parquet.parquet_dataconverter"]], "merge_files() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.merge_files"]], "save_data() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[31, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.save_data"]], "graphnet.data.sqlite": [[33, "module-graphnet.data.sqlite"]], "sqlitedataconverter (class in graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter"]], "any_pulsemap_is_non_empty() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.any_pulsemap_is_non_empty"]], "construct_dataframe() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.construct_dataframe"]], "file_suffix (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter attribute)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.file_suffix"]], "graphnet.data.sqlite.sqlite_dataconverter": [[34, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "is_mc_tree() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.is_mc_tree"]], "is_pulse_map() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.is_pulse_map"]], "merge_files() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.merge_files"]], "save_data() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[34, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.save_data"]], "attach_index() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.attach_index"]], "create_table() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.create_table"]], "create_table_and_save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.create_table_and_save_to_sql"]], "database_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.database_exists"]], "database_table_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.database_table_exists"]], "graphnet.data.sqlite.sqlite_utilities": [[35, "module-graphnet.data.sqlite.sqlite_utilities"]], "run_sql_code() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.run_sql_code"]], "save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[35, "graphnet.data.sqlite.sqlite_utilities.save_to_sql"]], "graphnet.data.utilities": [[36, "module-graphnet.data.utilities"]], "parquettosqliteconverter (class in graphnet.data.utilities.parquet_to_sqlite)": [[37, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite": [[37, "module-graphnet.data.utilities.parquet_to_sqlite"]], "run() (graphnet.data.utilities.parquet_to_sqlite.parquettosqliteconverter method)": [[37, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter.run"]], "graphnet.data.utilities.random": [[38, "module-graphnet.data.utilities.random"]], "pairwise_shuffle() (in module graphnet.data.utilities.random)": [[38, "graphnet.data.utilities.random.pairwise_shuffle"]], "stringselectionresolver (class in graphnet.data.utilities.string_selection_resolver)": [[39, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver": [[39, "module-graphnet.data.utilities.string_selection_resolver"]], "resolve() (graphnet.data.utilities.string_selection_resolver.stringselectionresolver method)": [[39, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver.resolve"]], "graphnet.deployment": [[40, "module-graphnet.deployment"]], "graphnet.pisa": [[73, "module-graphnet.pisa"]], "contourfitter (class in graphnet.pisa.fitting)": [[74, "graphnet.pisa.fitting.ContourFitter"]], "weightfitter (class in graphnet.pisa.fitting)": [[74, "graphnet.pisa.fitting.WeightFitter"]], "config_updater() (in module graphnet.pisa.fitting)": [[74, "graphnet.pisa.fitting.config_updater"]], "fit_1d_contour() (graphnet.pisa.fitting.contourfitter method)": [[74, "graphnet.pisa.fitting.ContourFitter.fit_1d_contour"]], "fit_2d_contour() (graphnet.pisa.fitting.contourfitter method)": [[74, "graphnet.pisa.fitting.ContourFitter.fit_2d_contour"]], "fit_weights() (graphnet.pisa.fitting.weightfitter method)": [[74, "graphnet.pisa.fitting.WeightFitter.fit_weights"]], "graphnet.pisa.fitting": [[74, "module-graphnet.pisa.fitting"]], "graphnet.pisa.plotting": [[75, "module-graphnet.pisa.plotting"]], "plot_1d_contour() (in module graphnet.pisa.plotting)": [[75, "graphnet.pisa.plotting.plot_1D_contour"]], "plot_2d_contour() (in module graphnet.pisa.plotting)": [[75, "graphnet.pisa.plotting.plot_2D_contour"]], "read_entry() (in module graphnet.pisa.plotting)": [[75, "graphnet.pisa.plotting.read_entry"]], "graphnet.training": [[76, "module-graphnet.training"]], "piecewiselinearlr (class in graphnet.training.callbacks)": [[77, "graphnet.training.callbacks.PiecewiseLinearLR"]], "progressbar (class in graphnet.training.callbacks)": [[77, "graphnet.training.callbacks.ProgressBar"]], "get_lr() (graphnet.training.callbacks.piecewiselinearlr method)": [[77, "graphnet.training.callbacks.PiecewiseLinearLR.get_lr"]], "get_metrics() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.get_metrics"]], "graphnet.training.callbacks": [[77, "module-graphnet.training.callbacks"]], "init_predict_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_predict_tqdm"]], "init_test_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_test_tqdm"]], "init_train_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_train_tqdm"]], "init_validation_tqdm() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.init_validation_tqdm"]], "on_train_epoch_end() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.on_train_epoch_end"]], "on_train_epoch_start() (graphnet.training.callbacks.progressbar method)": [[77, "graphnet.training.callbacks.ProgressBar.on_train_epoch_start"]], "bjoernlow (class in graphnet.training.weight_fitting)": [[81, "graphnet.training.weight_fitting.BjoernLow"]], "uniform (class in graphnet.training.weight_fitting)": [[81, "graphnet.training.weight_fitting.Uniform"]], "weightfitter (class in graphnet.training.weight_fitting)": [[81, "graphnet.training.weight_fitting.WeightFitter"]], "fit() (graphnet.training.weight_fitting.weightfitter method)": [[81, "graphnet.training.weight_fitting.WeightFitter.fit"]], "graphnet.training.weight_fitting": [[81, "module-graphnet.training.weight_fitting"]], "graphnet.utilities": [[82, "module-graphnet.utilities"]], "argumentparser (class in graphnet.utilities.argparse)": [[83, "graphnet.utilities.argparse.ArgumentParser"]], "options (class in graphnet.utilities.argparse)": [[83, "graphnet.utilities.argparse.Options"]], "contains() (graphnet.utilities.argparse.options method)": [[83, "graphnet.utilities.argparse.Options.contains"]], "graphnet.utilities.argparse": [[83, "module-graphnet.utilities.argparse"]], "pop_default() (graphnet.utilities.argparse.options method)": [[83, "graphnet.utilities.argparse.Options.pop_default"]], "standard_arguments (graphnet.utilities.argparse.argumentparser attribute)": [[83, "graphnet.utilities.argparse.ArgumentParser.standard_arguments"]], "with_standard_arguments() (graphnet.utilities.argparse.argumentparser method)": [[83, "graphnet.utilities.argparse.ArgumentParser.with_standard_arguments"]], "graphnet.utilities.config": [[84, "module-graphnet.utilities.config"]], "baseconfig (class in graphnet.utilities.config.base_config)": [[85, "graphnet.utilities.config.base_config.BaseConfig"]], "as_dict() (graphnet.utilities.config.base_config.baseconfig method)": [[85, "graphnet.utilities.config.base_config.BaseConfig.as_dict"]], "dump() (graphnet.utilities.config.base_config.baseconfig method)": [[85, "graphnet.utilities.config.base_config.BaseConfig.dump"]], "get_all_argument_values() (in module graphnet.utilities.config.base_config)": [[85, "graphnet.utilities.config.base_config.get_all_argument_values"]], "graphnet.utilities.config.base_config": [[85, "module-graphnet.utilities.config.base_config"]], "load() (graphnet.utilities.config.base_config.baseconfig class method)": [[85, "graphnet.utilities.config.base_config.BaseConfig.load"]], "model_config (graphnet.utilities.config.base_config.baseconfig attribute)": [[85, "graphnet.utilities.config.base_config.BaseConfig.model_config"]], "model_fields (graphnet.utilities.config.base_config.baseconfig attribute)": [[85, "graphnet.utilities.config.base_config.BaseConfig.model_fields"]], "configurable (class in graphnet.utilities.config.configurable)": [[86, "graphnet.utilities.config.configurable.Configurable"]], "config (graphnet.utilities.config.configurable.configurable property)": [[86, "graphnet.utilities.config.configurable.Configurable.config"]], "from_config() (graphnet.utilities.config.configurable.configurable class method)": [[86, "graphnet.utilities.config.configurable.Configurable.from_config"]], "graphnet.utilities.config.configurable": [[86, "module-graphnet.utilities.config.configurable"]], "save_config() (graphnet.utilities.config.configurable.configurable method)": [[86, "graphnet.utilities.config.configurable.Configurable.save_config"]], "datasetconfig (class in graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig"]], "datasetconfigsaverabcmeta (class in graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfigSaverABCMeta"]], "datasetconfigsavermeta (class in graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfigSaverMeta"]], "as_dict() (graphnet.utilities.config.dataset_config.datasetconfig method)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.as_dict"]], "features (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.features"]], "graph_definition (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.graph_definition"]], "graphnet.utilities.config.dataset_config": [[87, "module-graphnet.utilities.config.dataset_config"]], "index_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.index_column"]], "loss_weight_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_column"]], "loss_weight_default_value (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_default_value"]], "loss_weight_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_table"]], "model_config (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.model_config"]], "model_fields (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.model_fields"]], "node_truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth"]], "node_truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth_table"]], "path (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.path"]], "pulsemaps (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.pulsemaps"]], "save_dataset_config() (in module graphnet.utilities.config.dataset_config)": [[87, "graphnet.utilities.config.dataset_config.save_dataset_config"]], "seed (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.seed"]], "selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.selection"]], "string_selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.string_selection"]], "truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.truth"]], "truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[87, "graphnet.utilities.config.dataset_config.DatasetConfig.truth_table"]], "modelconfig (class in graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.ModelConfig"]], "modelconfigsaverabc (class in graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.ModelConfigSaverABC"]], "modelconfigsavermeta (class in graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.ModelConfigSaverMeta"]], "arguments (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.arguments"]], "as_dict() (graphnet.utilities.config.model_config.modelconfig method)": [[88, "graphnet.utilities.config.model_config.ModelConfig.as_dict"]], "class_name (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.class_name"]], "graphnet.utilities.config.model_config": [[88, "module-graphnet.utilities.config.model_config"]], "model_config (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.model_config"]], "model_fields (graphnet.utilities.config.model_config.modelconfig attribute)": [[88, "graphnet.utilities.config.model_config.ModelConfig.model_fields"]], "save_model_config() (in module graphnet.utilities.config.model_config)": [[88, "graphnet.utilities.config.model_config.save_model_config"]], "get_all_grapnet_classes() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.get_all_grapnet_classes"]], "get_graphnet_classes() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.get_graphnet_classes"]], "graphnet.utilities.config.parsing": [[89, "module-graphnet.utilities.config.parsing"]], "is_graphnet_class() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.is_graphnet_class"]], "is_graphnet_module() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.is_graphnet_module"]], "list_all_submodules() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.list_all_submodules"]], "traverse_and_apply() (in module graphnet.utilities.config.parsing)": [[89, "graphnet.utilities.config.parsing.traverse_and_apply"]], "trainingconfig (class in graphnet.utilities.config.training_config)": [[90, "graphnet.utilities.config.training_config.TrainingConfig"]], "dataloader (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.dataloader"]], "early_stopping_patience (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.early_stopping_patience"]], "fit (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.fit"]], "graphnet.utilities.config.training_config": [[90, "module-graphnet.utilities.config.training_config"]], "model_config (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.model_config"]], "model_fields (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.model_fields"]], "target (graphnet.utilities.config.training_config.trainingconfig attribute)": [[90, "graphnet.utilities.config.training_config.TrainingConfig.target"]], "graphnet.utilities.decorators": [[91, "module-graphnet.utilities.decorators"]], "find_i3_files() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.find_i3_files"]], "graphnet.utilities.filesys": [[92, "module-graphnet.utilities.filesys"]], "has_extension() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.has_extension"]], "is_gcd_file() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.is_gcd_file"]], "is_i3_file() (in module graphnet.utilities.filesys)": [[92, "graphnet.utilities.filesys.is_i3_file"]], "graphnet.utilities.imports": [[93, "module-graphnet.utilities.imports"]], "has_icecube_package() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.has_icecube_package"]], "has_pisa_package() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.has_pisa_package"]], "has_torch_package() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.has_torch_package"]], "requires_icecube() (in module graphnet.utilities.imports)": [[93, "graphnet.utilities.imports.requires_icecube"]], "logger (class in graphnet.utilities.logging)": [[94, "graphnet.utilities.logging.Logger"]], "repeatfilter (class in graphnet.utilities.logging)": [[94, "graphnet.utilities.logging.RepeatFilter"]], "critical() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.critical"]], "debug() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.debug"]], "error() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.error"]], "file_handlers (graphnet.utilities.logging.logger property)": [[94, "graphnet.utilities.logging.Logger.file_handlers"]], "filter() (graphnet.utilities.logging.repeatfilter method)": [[94, "graphnet.utilities.logging.RepeatFilter.filter"]], "graphnet.utilities.logging": [[94, "module-graphnet.utilities.logging"]], "handlers (graphnet.utilities.logging.logger property)": [[94, "graphnet.utilities.logging.Logger.handlers"]], "info() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.info"]], "nb_repeats_allowed (graphnet.utilities.logging.repeatfilter attribute)": [[94, "graphnet.utilities.logging.RepeatFilter.nb_repeats_allowed"]], "setlevel() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.setLevel"]], "stream_handlers (graphnet.utilities.logging.logger property)": [[94, "graphnet.utilities.logging.Logger.stream_handlers"]], "warning() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.warning"]], "warning_once() (graphnet.utilities.logging.logger method)": [[94, "graphnet.utilities.logging.Logger.warning_once"]], "eps_like() (in module graphnet.utilities.maths)": [[95, "graphnet.utilities.maths.eps_like"]], "graphnet.utilities.maths": [[95, "module-graphnet.utilities.maths"]]}}) \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index b44cf6991..392b74763 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -1 +1 @@ -https://graphnet-team.github.io/graphnetabout.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataloader.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.parquet_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.sqlite_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3extractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3featureextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3genericextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3particleextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3retroextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3truthextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3tumextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.collections.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.frames.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.types.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.pipeline.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.random.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.deployer.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.graphnet_module.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.coarsening.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.layers.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.pool.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.icecube.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.prometheus.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.convnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_jinst.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_kaggle_tito.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graph_definition.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.standard_model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.classification.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.reconstruction.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.plotting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.callbacks.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.labels.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.loss_functions.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.weight_fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.argparse.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.base_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.configurable.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.dataset_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.model_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.parsing.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.training_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.decorators.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.filesys.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.imports.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.logging.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.maths.htmlhttps://graphnet-team.github.io/graphnetapi/modules.htmlhttps://graphnet-team.github.io/graphnetcontribute.htmlhttps://graphnet-team.github.io/graphnetindex.htmlhttps://graphnet-team.github.io/graphnetinstall.htmlhttps://graphnet-team.github.io/graphnetgenindex.htmlhttps://graphnet-team.github.io/graphnetpy-modindex.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/constants.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataloader.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataset/dataset.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataset/parquet/parquet_dataset.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataset/sqlite/sqlite_dataset.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3extractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3featureextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3genericextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3particleextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3retroextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3truthextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3tumextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/collections.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/frames.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/types.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/parquet/parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/pipeline.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/random.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/deployment/i3modules/graphnet_module.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/coarsening.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/components/layers.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/components/pool.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/detector.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/icecube.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/prometheus.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/convnet.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/dynedge.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/dynedge_jinst.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/dynedge_kaggle_tito.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/gnn.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/edges/edges.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/graph_definition.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/graphs.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/nodes/nodes.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/model.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/standard_model.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/task/classification.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/task/reconstruction.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/task/task.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/utils.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/plotting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/callbacks.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/labels.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/loss_functions.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/utils.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/weight_fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/argparse.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/base_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/configurable.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/dataset_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/model_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/parsing.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/training_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/filesys.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/imports.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/logging.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/maths.htmlhttps://graphnet-team.github.io/graphnet_modules/index.htmlhttps://graphnet-team.github.io/graphnetsearch.html \ No newline at end of file +https://graphnet-team.github.io/graphnetabout.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataloader.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.parquet_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.sqlite_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3extractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3featureextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3genericextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3particleextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3retroextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3truthextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3tumextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.collections.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.frames.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.types.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.pipeline.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.random.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.deployer.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.graphnet_module.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.coarsening.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.layers.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.pool.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.icecube.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.prometheus.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.convnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_jinst.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_kaggle_tito.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graph_definition.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.standard_model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.classification.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.reconstruction.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.plotting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.callbacks.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.labels.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.loss_functions.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.weight_fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.argparse.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.base_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.configurable.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.dataset_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.model_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.parsing.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.training_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.decorators.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.filesys.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.imports.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.logging.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.maths.htmlhttps://graphnet-team.github.io/graphnetapi/modules.htmlhttps://graphnet-team.github.io/graphnetcontribute.htmlhttps://graphnet-team.github.io/graphnetindex.htmlhttps://graphnet-team.github.io/graphnetinstall.htmlhttps://graphnet-team.github.io/graphnetgenindex.htmlhttps://graphnet-team.github.io/graphnetpy-modindex.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/constants.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3extractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3featureextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3genericextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3particleextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3retroextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3truthextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3tumextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/collections.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/frames.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/types.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/parquet/parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/random.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/plotting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/callbacks.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/weight_fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/argparse.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/base_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/configurable.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/dataset_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/model_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/parsing.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/training_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/filesys.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/imports.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/logging.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/maths.htmlhttps://graphnet-team.github.io/graphnet_modules/index.htmlhttps://graphnet-team.github.io/graphnetsearch.html \ No newline at end of file