From 92855c8969503dc63bbb608c93add340ec4b6ebc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20S=C3=B8gaard?= Date: Mon, 5 Feb 2024 05:27:27 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20Aske-Ros?= =?UTF-8?q?ted/graphnet@b686dd72d5840b8e6621ac04e7abc3ac1e7ccadd=20?= =?UTF-8?q?=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- _modules/graphnet/data/dataloader.html | 457 ------- _modules/graphnet/data/dataset/dataset.html | 1089 ----------------- .../data/dataset/parquet/parquet_dataset.html | 500 -------- .../data/dataset/sqlite/sqlite_dataset.html | 515 -------- _modules/graphnet/data/pipeline.html | 593 --------- .../deployment/i3modules/graphnet_module.html | 817 ------------- _modules/graphnet/models/coarsening.html | 708 ----------- .../graphnet/models/components/layers.html | 579 --------- _modules/graphnet/models/components/pool.html | 656 ---------- _modules/graphnet/models/gnn/convnet.html | 484 -------- _modules/graphnet/models/gnn/dynedge.html | 691 ----------- .../graphnet/models/gnn/dynedge_jinst.html | 519 -------- .../models/gnn/dynedge_kaggle_tito.html | 633 ---------- _modules/graphnet/models/gnn/gnn.html | 401 ------ .../graphnet/models/graphs/edges/edges.html | 559 --------- .../models/graphs/edges/minkowski.html | 464 ------- .../models/graphs/graph_definition.html | 815 ------------ _modules/graphnet/models/graphs/graphs.html | 419 ------- .../graphnet/models/graphs/nodes/nodes.html | 591 --------- _modules/graphnet/models/graphs/utils.html | 532 -------- _modules/graphnet/models/standard_model.html | 912 -------------- .../graphnet/models/task/classification.html | 411 ------- .../graphnet/models/task/reconstruction.html | 632 ---------- _modules/graphnet/models/task/task.html | 844 ------------- _modules/graphnet/models/utils.html | 430 ------- _modules/graphnet/training/utils.html | 697 ----------- _modules/index.html | 26 - api/graphnet.data.dataloader.html | 127 +- api/graphnet.data.dataset.dataset.html | 333 +---- api/graphnet.data.dataset.html | 14 +- api/graphnet.data.dataset.parquet.html | 10 +- ....data.dataset.parquet.parquet_dataset.html | 119 +- api/graphnet.data.dataset.sqlite.html | 10 +- ...et.data.dataset.sqlite.sqlite_dataset.html | 119 +- api/graphnet.data.html | 12 +- api/graphnet.data.pipeline.html | 53 +- ...a.utilities.string_selection_resolver.html | 2 +- ....deployment.i3modules.graphnet_module.html | 133 +- api/graphnet.deployment.i3modules.html | 7 +- api/graphnet.models.coarsening.html | 233 +--- api/graphnet.models.components.html | 26 +- api/graphnet.models.components.layers.html | 251 +--- api/graphnet.models.components.pool.html | 337 +---- api/graphnet.models.gnn.convnet.html | 79 +- api/graphnet.models.gnn.dynedge.html | 101 +- api/graphnet.models.gnn.dynedge_jinst.html | 76 +- ...aphnet.models.gnn.dynedge_kaggle_tito.html | 90 +- api/graphnet.models.gnn.gnn.html | 104 +- api/graphnet.models.gnn.html | 30 +- api/graphnet.models.graphs.edges.edges.html | 179 +-- api/graphnet.models.graphs.edges.html | 22 +- ...raphnet.models.graphs.edges.minkowski.html | 82 +- ...aphnet.models.graphs.graph_definition.html | 114 +- api/graphnet.models.graphs.graphs.html | 59 +- api/graphnet.models.graphs.html | 26 +- api/graphnet.models.graphs.nodes.html | 15 +- api/graphnet.models.graphs.nodes.nodes.html | 214 +--- api/graphnet.models.graphs.utils.html | 167 +-- api/graphnet.models.html | 32 +- api/graphnet.models.standard_model.html | 380 +----- api/graphnet.models.task.classification.html | 211 +--- api/graphnet.models.task.html | 38 +- api/graphnet.models.task.reconstruction.html | 1069 +--------------- api/graphnet.models.task.task.html | 559 +-------- api/graphnet.models.utils.html | 108 +- api/graphnet.training.callbacks.html | 16 +- api/graphnet.training.html | 10 +- api/graphnet.training.utils.html | 215 +--- ...graphnet.utilities.config.base_config.html | 19 +- ...phnet.utilities.config.dataset_config.html | 16 + ...raphnet.utilities.config.model_config.html | 16 + ...hnet.utilities.config.training_config.html | 19 +- genindex.html | 800 +----------- objects.inv | Bin 6677 -> 4806 bytes py-modindex.html | 180 --- searchindex.js | 2 +- sitemap.xml | 2 +- 77 files changed, 212 insertions(+), 22598 deletions(-) delete mode 100644 _modules/graphnet/data/dataloader.html delete mode 100644 _modules/graphnet/data/dataset/dataset.html delete mode 100644 _modules/graphnet/data/dataset/parquet/parquet_dataset.html delete mode 100644 _modules/graphnet/data/dataset/sqlite/sqlite_dataset.html delete mode 100644 _modules/graphnet/data/pipeline.html delete mode 100644 _modules/graphnet/deployment/i3modules/graphnet_module.html delete mode 100644 _modules/graphnet/models/coarsening.html delete mode 100644 _modules/graphnet/models/components/layers.html delete mode 100644 _modules/graphnet/models/components/pool.html delete mode 100644 _modules/graphnet/models/gnn/convnet.html delete mode 100644 _modules/graphnet/models/gnn/dynedge.html delete mode 100644 _modules/graphnet/models/gnn/dynedge_jinst.html delete mode 100644 _modules/graphnet/models/gnn/dynedge_kaggle_tito.html delete mode 100644 _modules/graphnet/models/gnn/gnn.html delete mode 100644 _modules/graphnet/models/graphs/edges/edges.html delete mode 100644 _modules/graphnet/models/graphs/edges/minkowski.html delete mode 100644 _modules/graphnet/models/graphs/graph_definition.html delete mode 100644 _modules/graphnet/models/graphs/graphs.html delete mode 100644 _modules/graphnet/models/graphs/nodes/nodes.html delete mode 100644 _modules/graphnet/models/graphs/utils.html delete mode 100644 _modules/graphnet/models/standard_model.html delete mode 100644 _modules/graphnet/models/task/classification.html delete mode 100644 _modules/graphnet/models/task/reconstruction.html delete mode 100644 _modules/graphnet/models/task/task.html delete mode 100644 _modules/graphnet/models/utils.html delete mode 100644 _modules/graphnet/training/utils.html diff --git a/_modules/graphnet/data/dataloader.html b/_modules/graphnet/data/dataloader.html deleted file mode 100644 index 6aa911562..000000000 --- a/_modules/graphnet/data/dataloader.html +++ /dev/null @@ -1,457 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataloader — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataloader

-"""Base `Dataloader` class(es) used in `graphnet`."""
-
-from typing import Any, Callable, Dict, List, Union
-
-import torch.utils.data
-from torch_geometric.data import Batch, Data
-
-from graphnet.data.dataset import Dataset
-from graphnet.utilities.config import DatasetConfig
-
-
-
-[docs] -def collate_fn(graphs: List[Data]) -> Batch: - """Remove graphs with less than two DOM hits. - - Should not occur in "production. - """ - graphs = [g for g in graphs if g.n_pulses > 1] - return Batch.from_data_list(graphs)
- - - -
-[docs] -def do_shuffle(selection_name: str) -> bool: - """Check whether to shuffle selection with name `selection_name`.""" - return "train" in selection_name.lower()
- - - -
-[docs] -class DataLoader(torch.utils.data.DataLoader): - """Class for loading data from a `Dataset`.""" - - def __init__( - self, - dataset: Dataset, - batch_size: int = 1, - shuffle: bool = False, - num_workers: int = 10, - persistent_workers: bool = True, - collate_fn: Callable = collate_fn, - prefetch_factor: int = 2, - **kwargs: Any, - ) -> None: - """Construct `DataLoader`.""" - # Base class constructor - super().__init__( - dataset, - batch_size=batch_size, - shuffle=shuffle, - num_workers=num_workers, - collate_fn=collate_fn, - persistent_workers=persistent_workers, - prefetch_factor=prefetch_factor, - **kwargs, - ) - -
-[docs] - @classmethod - def from_dataset_config( - cls, - config: DatasetConfig, - **kwargs: Any, - ) -> Union["DataLoader", Dict[str, "DataLoader"]]: - """Construct `DataLoader`s based on selections in `DatasetConfig`.""" - if isinstance(config.selection, dict): - assert "shuffle" not in kwargs, ( - "When passing a `DatasetConfig` with multiple selections, " - "`shuffle` is automatically inferred from the selection name, " - "and thus should not specified as an argument." - ) - datasets = Dataset.from_config(config) - assert isinstance(datasets, dict) - data_loaders: Dict[str, DataLoader] = {} - for name, dataset in datasets.items(): - data_loaders[name] = cls( - dataset, - shuffle=do_shuffle(name), - **kwargs, - ) - - return data_loaders - - else: - assert "shuffle" in kwargs, ( - "When passing a `DatasetConfig` with a single selections, you " - "need to specify `shuffle` as an argument." - ) - dataset = Dataset.from_config(config) - assert isinstance(dataset, Dataset) - return cls(dataset, **kwargs)
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/dataset/dataset.html b/_modules/graphnet/data/dataset/dataset.html deleted file mode 100644 index 442af1a79..000000000 --- a/_modules/graphnet/data/dataset/dataset.html +++ /dev/null @@ -1,1089 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataset.dataset — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataset.dataset

-"""Base :py:class:`Dataset` class(es) used in GraphNeT."""
-
-from copy import deepcopy
-from abc import ABC, abstractmethod
-from typing import (
-    cast,
-    Any,
-    Callable,
-    Dict,
-    List,
-    Optional,
-    Tuple,
-    Union,
-    Iterable,
-    Type,
-)
-
-import numpy as np
-import torch
-from torch_geometric.data import Data
-
-from graphnet.constants import GRAPHNET_ROOT_DIR
-from graphnet.data.utilities.string_selection_resolver import (
-    StringSelectionResolver,
-)
-from graphnet.training.labels import Label
-from graphnet.utilities.config import (
-    Configurable,
-    DatasetConfig,
-    DatasetConfigSaverABCMeta,
-)
-from graphnet.utilities.config.parsing import traverse_and_apply
-from graphnet.utilities.logging import Logger
-from graphnet.models.graphs import GraphDefinition
-
-from graphnet.utilities.config.parsing import (
-    get_all_grapnet_classes,
-)
-
-
-
-[docs] -class ColumnMissingException(Exception): - """Exception to indicate a missing column in a dataset."""
- - - -
-[docs] -def load_module(class_name: str) -> Type: - """Load graphnet module from string name. - - Args: - class_name: name of class - - Returns: - graphnet module. - """ - # Get a lookup for all classes in `graphnet` - import graphnet.data - import graphnet.models - import graphnet.training - - namespace_classes = get_all_grapnet_classes( - graphnet.data, graphnet.models, graphnet.training - ) - return namespace_classes[class_name]
- - - -
-[docs] -def parse_graph_definition(cfg: dict) -> GraphDefinition: - """Construct GraphDefinition from DatasetConfig.""" - assert cfg["graph_definition"] is not None - - args = cfg["graph_definition"]["arguments"] - classes = {} - for arg in args.keys(): - if isinstance(args[arg], dict): - if "class_name" in args[arg].keys(): - classes[arg] = load_module(args[arg]["class_name"])( - **args[arg]["arguments"] - ) - if arg == "dtype": - args[arg] = eval(args[arg]) # converts string to class - - new_cfg = deepcopy(args) - new_cfg.update(classes) - graph_definition = load_module(cfg["graph_definition"]["class_name"])( - **new_cfg - ) - return graph_definition
- - - -
-[docs] -class Dataset( - Logger, - Configurable, - torch.utils.data.Dataset, - ABC, - metaclass=DatasetConfigSaverABCMeta, -): - """Base Dataset class for reading from any intermediate file format.""" - - # Class method(s) -
-[docs] - @classmethod - def from_config( # type: ignore[override] - cls, - source: Union[DatasetConfig, str], - ) -> Union[ - "Dataset", - "EnsembleDataset", - Dict[str, "Dataset"], - Dict[str, "EnsembleDataset"], - ]: - """Construct `Dataset` instance from `source` configuration.""" - if isinstance(source, str): - source = DatasetConfig.load(source) - - assert isinstance(source, DatasetConfig), ( - f"Argument `source` of type ({type(source)}) is not a " - "`DatasetConfig`" - ) - - assert ( - "graph_definition" in source.dict().keys() - ), "`DatasetConfig` incompatible with current GraphNeT version." - - # Parse set of `selection``. - if isinstance(source.selection, dict): - return cls._construct_datasets_from_dict(source) - elif ( - isinstance(source.selection, list) - and len(source.selection) - and isinstance(source.selection[0], str) - ): - return cls._construct_dataset_from_list_of_strings(source) - - cfg = source.dict() - if cfg["graph_definition"] is not None: - cfg["graph_definition"] = parse_graph_definition(cfg) - return source._dataset_class(**cfg)
- - -
-[docs] - @classmethod - def concatenate( - cls, - datasets: List["Dataset"], - ) -> "EnsembleDataset": - """Concatenate multiple `Dataset`s into one instance.""" - return EnsembleDataset(datasets)
- - - @classmethod - def _construct_datasets_from_dict( - cls, config: DatasetConfig - ) -> Dict[str, "Dataset"]: - """Construct `Dataset` for each entry in dict `self.selection`.""" - assert isinstance(config.selection, dict) - datasets: Dict[str, "Dataset"] = {} - selections: Dict[str, Union[str, List]] = deepcopy(config.selection) - for key, selection in selections.items(): - config.selection = selection - dataset = Dataset.from_config(config) - assert isinstance(dataset, (Dataset, EnsembleDataset)) - datasets[key] = dataset - - # Reset `selections`. - config.selection = selections - - return datasets - - @classmethod - def _construct_dataset_from_list_of_strings( - cls, config: DatasetConfig - ) -> "Dataset": - """Construct `Dataset` for each entry in list `self.selection`.""" - assert isinstance(config.selection, list) - datasets: List["Dataset"] = [] - selections: List[str] = deepcopy(cast(List[str], config.selection)) - for selection in selections: - config.selection = selection - dataset = Dataset.from_config(config) - assert isinstance(dataset, Dataset) - datasets.append(dataset) - - # Reset `selections`. - config.selection = selections - - return cls.concatenate(datasets) - - @classmethod - def _resolve_graphnet_paths( - cls, path: Union[str, List[str]] - ) -> Union[str, List[str]]: - if isinstance(path, list): - return [cast(str, cls._resolve_graphnet_paths(p)) for p in path] - - assert isinstance(path, str) - return ( - path.replace("$graphnet", GRAPHNET_ROOT_DIR) - .replace("$GRAPHNET", GRAPHNET_ROOT_DIR) - .replace("${graphnet}", GRAPHNET_ROOT_DIR) - .replace("${GRAPHNET}", GRAPHNET_ROOT_DIR) - ) - - def __init__( - self, - path: Union[str, List[str]], - graph_definition: GraphDefinition, - pulsemaps: Union[str, List[str]], - features: List[str], - truth: List[str], - *, - node_truth: Optional[List[str]] = None, - index_column: str = "event_no", - truth_table: str = "truth", - node_truth_table: Optional[str] = None, - string_selection: Optional[List[int]] = None, - selection: Optional[Union[str, List[int], List[List[int]]]] = None, - dtype: torch.dtype = torch.float32, - loss_weight_table: Optional[str] = None, - loss_weight_column: Optional[str] = None, - loss_weight_default_value: Optional[float] = None, - seed: Optional[int] = None, - ): - """Construct Dataset. - - Args: - path: Path to the file(s) from which this `Dataset` should read. - pulsemaps: Name(s) of the pulse map series that should be used to - construct the nodes on the individual graph objects, and their - features. Multiple pulse series maps can be used, e.g., when - different DOM types are stored in different maps. - features: List of columns in the input files that should be used as - node features on the graph objects. - truth: List of event-level columns in the input files that should - be used added as attributes on the graph objects. - node_truth: List of node-level columns in the input files that - should be used added as attributes on the graph objects. - index_column: Name of the column in the input files that contains - unique indicies to identify and map events across tables. - truth_table: Name of the table containing event-level truth - information. - node_truth_table: Name of the table containing node-level truth - information. - string_selection: Subset of strings for which data should be read - and used to construct graph objects. Defaults to None, meaning - all strings for which data exists are used. - selection: The events that should be read. This can be given either - as list of indicies (in `index_column`); or a string-based - selection used to query the `Dataset` for events passing the - selection. Defaults to None, meaning that all events in the - input files are read. - dtype: Type of the feature tensor on the graph objects returned. - loss_weight_table: Name of the table containing per-event loss - weights. - loss_weight_column: Name of the column in `loss_weight_table` - containing per-event loss weights. This is also the name of the - corresponding attribute assigned to the graph object. - loss_weight_default_value: Default per-event loss weight. - NOTE: This default value is only applied when - `loss_weight_table` and `loss_weight_column` are specified, and - in this case to events with no value in the corresponding - table/column. That is, if no per-event loss weight table/column - is provided, this value is ignored. Defaults to None. - seed: Random number generator seed, used for selecting a random - subset of events when resolving a string-based selection (e.g., - `"10000 random events ~ event_no % 5 > 0"` or `"20% random - events ~ event_no % 5 > 0"`). - graph_definition: Method that defines the graph representation. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Check(s) - if isinstance(pulsemaps, str): - pulsemaps = [pulsemaps] - - assert isinstance(features, (list, tuple)) - assert isinstance(truth, (list, tuple)) - - # Resolve reference to `$GRAPHNET` in path(s) - path = self._resolve_graphnet_paths(path) - - # Member variable(s) - self._path = path - self._selection = None - self._pulsemaps = pulsemaps - self._features = [index_column] + features - self._truth = [index_column] + truth - self._index_column = index_column - self._truth_table = truth_table - self._loss_weight_default_value = loss_weight_default_value - self._graph_definition = deepcopy(graph_definition) - - if node_truth is not None: - assert isinstance(node_truth_table, str) - if isinstance(node_truth, str): - node_truth = [node_truth] - - self._node_truth = node_truth - self._node_truth_table = node_truth_table - - if string_selection is not None: - self.warning( - ( - "String selection detected.\n " - f"Accepted strings: {string_selection}\n " - "All other strings are ignored!" - ) - ) - if isinstance(string_selection, int): - string_selection = [string_selection] - - self._string_selection = string_selection - - self._selection = None - if self._string_selection: - self._selection = f"string in {str(tuple(self._string_selection))}" - - self._loss_weight_column = loss_weight_column - self._loss_weight_table = loss_weight_table - if (self._loss_weight_table is None) and ( - self._loss_weight_column is not None - ): - self.warning("Error: no loss weight table specified") - assert isinstance(self._loss_weight_table, str) - if (self._loss_weight_table is not None) and ( - self._loss_weight_column is None - ): - self.warning("Error: no loss weight column specified") - assert isinstance(self._loss_weight_column, str) - - self._dtype = dtype - - self._label_fns: Dict[str, Callable[[Data], Any]] = {} - - self._string_selection_resolver = StringSelectionResolver( - self, - index_column=index_column, - seed=seed, - ) - - # Implementation-specific initialisation. - self._init() - - # Set unique indices - self._indices: Union[List[int], List[List[int]]] - if selection is None: - self._indices = self._get_all_indices() - elif isinstance(selection, str): - self._indices = self._resolve_string_selection_to_indices( - selection - ) - else: - self._indices = selection - - # Purely internal member variables - self._missing_variables: Dict[str, List[str]] = {} - self._remove_missing_columns() - - # Implementation-specific post-init code. - self._post_init() - - # Properties - @property - def path(self) -> Union[str, List[str]]: - """Path to the file(s) from which this `Dataset` reads.""" - return self._path - - @property - def truth_table(self) -> str: - """Name of the table containing event-level truth information.""" - return self._truth_table - - # Abstract method(s) - @abstractmethod - def _init(self) -> None: - """Set internal representation needed to read data from input file.""" - - def _post_init(self) -> None: - """Implementation-specific code executed after the main constructor.""" - - @abstractmethod - def _get_all_indices(self) -> List[int]: - """Return a list of all unique values in `self._index_column`.""" - - @abstractmethod - def _get_event_index( - self, sequential_index: Optional[int] - ) -> Optional[int]: - """Return the event index corresponding to a `sequential_index`.""" - -
-[docs] - @abstractmethod - def query_table( - self, - table: str, - columns: Union[List[str], str], - sequential_index: Optional[int] = None, - selection: Optional[str] = None, - ) -> List[Tuple[Any, ...]]: - """Query a table at a specific index, optionally with some selection. - - Args: - table: Table to be queried. - columns: Columns to read out. - sequential_index: Sequentially numbered index - (i.e. in [0,len(self))) of the event to query. This _may_ - differ from the indexation used in `self._indices`. If no value - is provided, the entire column is returned. - selection: Selection to be imposed before reading out data. - Defaults to None. - - Returns: - List of tuples containing the values in `columns`. If the `table` - contains only scalar data for `columns`, a list of length 1 is - returned - - Raises: - ColumnMissingException: If one or more element in `columns` is not - present in `table`. - """
- - - # Public method(s) -
-[docs] - def add_label( - self, fn: Callable[[Data], Any], key: Optional[str] = None - ) -> None: - """Add custom graph label define using function `fn`.""" - if isinstance(fn, Label): - key = fn.key - assert isinstance( - key, str - ), "Please specify a key for the custom label to be added." - assert ( - key not in self._label_fns - ), f"A custom label {key} has already been defined." - self._label_fns[key] = fn
- - - def __len__(self) -> int: - """Return number of graphs in `Dataset`.""" - return len(self._indices) - - def __getitem__(self, sequential_index: int) -> Data: - """Return graph `Data` object at `index`.""" - if not (0 <= sequential_index < len(self)): - raise IndexError( - f"Index {sequential_index} not in range [0, {len(self) - 1}]" - ) - features, truth, node_truth, loss_weight = self._query( - sequential_index - ) - graph = self._create_graph(features, truth, node_truth, loss_weight) - return graph - - # Internal method(s) - def _resolve_string_selection_to_indices( - self, selection: str - ) -> List[int]: - """Resolve selection as string to list of indices. - - Selections are expected to have pandas.DataFrame.query-compatible - syntax, e.g., ``` "event_no % 5 > 0" ``` Selections may also specify a - fixed number of events to randomly sample, e.g., ``` "10000 random - events ~ event_no % 5 > 0" "20% random events ~ event_no % 5 > 0" ``` - """ - return self._string_selection_resolver.resolve(selection) - - def _remove_missing_columns(self) -> None: - """Remove columns that are not present in the input file. - - Columns are removed from `self._features` and `self._truth`. - """ - # Check if table is completely empty - if len(self) == 0: - self.warning("Dataset is empty.") - return - - # Find missing features - missing_features_set = set(self._features) - for pulsemap in self._pulsemaps: - missing = self._check_missing_columns(self._features, pulsemap) - missing_features_set = missing_features_set.intersection(missing) - - missing_features = list(missing_features_set) - - # Find missing truth variables - missing_truth_variables = self._check_missing_columns( - self._truth, self._truth_table - ) - - # Remove missing features - if missing_features: - self.warning( - "Removing the following (missing) features: " - + ", ".join(missing_features) - ) - for missing_feature in missing_features: - self._features.remove(missing_feature) - - # Remove missing truth variables - if missing_truth_variables: - self.warning( - ( - "Removing the following (missing) truth variables: " - + ", ".join(missing_truth_variables) - ) - ) - for missing_truth_variable in missing_truth_variables: - self._truth.remove(missing_truth_variable) - - def _check_missing_columns( - self, - columns: List[str], - table: str, - ) -> List[str]: - """Return a list missing columns in `table`.""" - for column in columns: - try: - self.query_table(table, [column], 0) - except ColumnMissingException: - if table not in self._missing_variables: - self._missing_variables[table] = [] - self._missing_variables[table].append(column) - except IndexError: - self.warning(f"Dataset contains no entries for {column}") - - return self._missing_variables.get(table, []) - - def _query( - self, sequential_index: int - ) -> Tuple[ - List[Tuple[float, ...]], - Tuple[Any, ...], - Optional[List[Tuple[Any, ...]]], - Optional[float], - ]: - """Query file for event features and truth information. - - The returned lists have lengths corresponding to the number of pulses - in the event. Their constituent tuples have lengths corresponding to - the number of features/attributes in each output - - Args: - sequential_index: Sequentially numbered index - (i.e. in [0,len(self))) of the event to query. This _may_ - differ from the indexation used in `self._indices`. - - Returns: - Tuple containing pulse-level event features; event-level truth - information; pulse-level truth information; and event-level - loss weights, respectively. - """ - features = [] - for pulsemap in self._pulsemaps: - features_pulsemap = self.query_table( - pulsemap, self._features, sequential_index, self._selection - ) - features.extend(features_pulsemap) - - truth: Tuple[Any, ...] = self.query_table( - self._truth_table, self._truth, sequential_index - )[0] - if self._node_truth: - assert self._node_truth_table is not None - node_truth = self.query_table( - self._node_truth_table, - self._node_truth, - sequential_index, - self._selection, - ) - else: - node_truth = None - - loss_weight: Optional[float] = None # Default - if self._loss_weight_column is not None: - assert self._loss_weight_table is not None - loss_weight_list = self.query_table( - self._loss_weight_table, - self._loss_weight_column, - sequential_index, - ) - if len(loss_weight_list): - loss_weight = loss_weight_list[0][0] - else: - loss_weight = -1.0 - - return features, truth, node_truth, loss_weight - - def _create_graph( - self, - features: List[Tuple[float, ...]], - truth: Tuple[Any, ...], - node_truth: Optional[List[Tuple[Any, ...]]] = None, - loss_weight: Optional[float] = None, - ) -> Data: - """Create Pytorch Data (i.e. graph) object. - - Args: - features: List of tuples, containing event features. - truth: List of tuples, containing truth information. - node_truth: List of tuples, containing node-level truth. - loss_weight: A weight associated with the event for weighing the - loss. - - Returns: - Graph object. - """ - # Convert nested list to simple dict - truth_dict = { - key: truth[index] for index, key in enumerate(self._truth) - } - - # Define custom labels - labels_dict = self._get_labels(truth_dict) - - # Convert nested list to simple dict - if node_truth is not None: - node_truth_array = np.asarray(node_truth) - assert self._node_truth is not None - node_truth_dict = { - key: node_truth_array[:, index] - for index, key in enumerate(self._node_truth) - } - - # Create list of truth dicts with labels - truth_dicts = [labels_dict, truth_dict] - if node_truth is not None: - truth_dicts.append(node_truth_dict) - - # Catch cases with no reconstructed pulses - if len(features): - node_features = np.asarray(features)[ - :, 1: - ] # first entry is index column - else: - node_features = np.array([]).reshape((0, len(self._features) - 1)) - - # Construct graph data object - assert self._graph_definition is not None - graph = self._graph_definition( - input_features=node_features, - input_feature_names=self._features[ - 1: - ], # first entry is index column - truth_dicts=truth_dicts, - custom_label_functions=self._label_fns, - loss_weight_column=self._loss_weight_column, - loss_weight=loss_weight, - loss_weight_default_value=self._loss_weight_default_value, - data_path=self._path, - ) - return graph - - def _get_labels(self, truth_dict: Dict[str, Any]) -> Dict[str, Any]: - """Return dictionary of labels, to be added as graph attributes.""" - if "pid" in truth_dict.keys(): - abs_pid = abs(truth_dict["pid"]) - sim_type = truth_dict["sim_type"] - - labels_dict = { - self._index_column: truth_dict[self._index_column], - "muon": int(abs_pid == 13), - "muon_stopped": int(truth_dict.get("stopped_muon") == 1), - "noise": int((abs_pid == 1) & (sim_type != "data")), - "neutrino": int( - (abs_pid != 13) & (abs_pid != 1) - ), # @TODO: `abs_pid in [12,14,16]`? - "v_e": int(abs_pid == 12), - "v_u": int(abs_pid == 14), - "v_t": int(abs_pid == 16), - "track": int( - (abs_pid == 14) & (truth_dict["interaction_type"] == 1) - ), - "dbang": self._get_dbang_label(truth_dict), - "corsika": int(abs_pid > 20), - } - else: - labels_dict = { - self._index_column: truth_dict[self._index_column], - "muon": -1, - "muon_stopped": -1, - "noise": -1, - "neutrino": -1, - "v_e": -1, - "v_u": -1, - "v_t": -1, - "track": -1, - "dbang": -1, - "corsika": -1, - } - return labels_dict - - def _get_dbang_label(self, truth_dict: Dict[str, Any]) -> int: - """Get label for double-bang classification.""" - try: - label = int(truth_dict["dbang_decay_length"] > -1) - return label - except KeyError: - return -1
- - - -
-[docs] -class EnsembleDataset(torch.utils.data.ConcatDataset): - """Construct a single dataset from a collection of datasets.""" - - def __init__(self, datasets: Iterable[Dataset]) -> None: - """Construct a single dataset from a collection of datasets. - - Args: - datasets: A collection of Datasets - """ - super().__init__(datasets=datasets)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/dataset/parquet/parquet_dataset.html b/_modules/graphnet/data/dataset/parquet/parquet_dataset.html deleted file mode 100644 index 10827b1ba..000000000 --- a/_modules/graphnet/data/dataset/parquet/parquet_dataset.html +++ /dev/null @@ -1,500 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataset.parquet.parquet_dataset — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataset.parquet.parquet_dataset

-"""`Dataset` class(es) for reading from Parquet files."""
-
-from typing import Any, Dict, List, Optional, Tuple, Union, cast
-
-import numpy as np
-import awkward as ak
-
-from graphnet.data.dataset.dataset import Dataset, ColumnMissingException
-
-
-
-[docs] -class ParquetDataset(Dataset): - """Pytorch dataset for reading from Parquet files.""" - - # Implementing abstract method(s) - def _init(self) -> None: - # Check(s) - if not isinstance(self._path, list): - - assert isinstance(self._path, str) - - assert self._path.endswith( - ".parquet" - ), f"Format of input file `{self._path}` is not supported" - - assert ( - self._node_truth is None - ), "Argument `node_truth` is currently not supported." - assert ( - self._node_truth_table is None - ), "Argument `node_truth_table` is currently not supported." - assert ( - self._string_selection is None - ), "Argument `string_selection` is currently not supported" - - # Set custom member variable(s) - if not isinstance(self._path, list): - self._parquet_hook = ak.from_parquet(self._path, lazy=False) - else: - self._parquet_hook = ak.concatenate( - ak.from_parquet(file) for file in self._path - ) - - def _get_all_indices(self) -> List[int]: - return np.arange( - len( - ak.to_numpy( - self._parquet_hook[self._truth_table][self._index_column] - ).tolist() - ) - ).tolist() - - def _get_event_index( - self, sequential_index: Optional[int] - ) -> Optional[int]: - index: Optional[int] - if sequential_index is None: - index = None - else: - index = cast(List[int], self._indices)[sequential_index] - - return index - - def _format_dictionary_result( - self, dictionary: Dict - ) -> List[Tuple[Any, ...]]: - """Convert the output of `ak.to_list()` into a list of tuples.""" - # All scalar values - if all(map(np.isscalar, dictionary.values())): - return [tuple(dictionary.values())] - - # All arrays should have same length - array_lengths = [ - len(values) - for values in dictionary.values() - if not np.isscalar(values) - ] - assert len(set(array_lengths)) == 1, ( - f"Arrays in {dictionary} have differing lengths " - f"({set(array_lengths)})." - ) - nb_elements = array_lengths[0] - - # Broadcast scalars - for key in dictionary: - value = dictionary[key] - if np.isscalar(value): - dictionary[key] = np.repeat( - value, repeats=nb_elements - ).tolist() - - return list(map(tuple, list(zip(*dictionary.values())))) - -
-[docs] - def query_table( - self, - table: str, - columns: Union[List[str], str], - sequential_index: Optional[int] = None, - selection: Optional[str] = None, - ) -> List[Tuple[Any, ...]]: - """Query table at a specific index, optionally with some selection.""" - # Check(s) - assert ( - selection is None - ), "Argument `selection` is currently not supported" - - index = self._get_event_index(sequential_index) - - try: - if index is None: - ak_array = self._parquet_hook[table][columns][:] - else: - ak_array = self._parquet_hook[table][columns][index] - except ValueError as e: - if "does not exist (not in record)" in str(e): - raise ColumnMissingException(str(e)) - else: - raise e - - output = ak_array.to_list() - - result: List[Tuple[Any, ...]] = [] - - # Querying single index - if isinstance(output, dict): - assert list(output.keys()) == columns - result = self._format_dictionary_result(output) - - # Querying entire columm - elif isinstance(output, list): - for dictionary in output: - assert list(dictionary.keys()) == columns - result.extend(self._format_dictionary_result(dictionary)) - - return result
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/dataset/sqlite/sqlite_dataset.html b/_modules/graphnet/data/dataset/sqlite/sqlite_dataset.html deleted file mode 100644 index ef174366f..000000000 --- a/_modules/graphnet/data/dataset/sqlite/sqlite_dataset.html +++ /dev/null @@ -1,515 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.dataset.sqlite.sqlite_dataset — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.dataset.sqlite.sqlite_dataset

-"""`Dataset` class(es) for reading data from SQLite databases."""
-
-from typing import Any, List, Optional, Tuple, Union
-import pandas as pd
-import sqlite3
-
-from graphnet.data.dataset.dataset import Dataset, ColumnMissingException
-
-
-
-[docs] -class SQLiteDataset(Dataset): - """Pytorch dataset for reading data from SQLite databases.""" - - # Implementing abstract method(s) - def _init(self) -> None: - # Check(s) - self._database_list: Optional[List[str]] - if isinstance(self._path, list): - self._database_list = self._path - self._all_connections_established = False - self._all_connections: List[sqlite3.Connection] = [] - else: - self._database_list = None - assert isinstance(self._path, str) - assert self._path.endswith( - ".db" - ), f"Format of input file `{self._path}` is not supported." - - if self._database_list is not None: - self._current_database: Optional[int] = None - - # Set custom member variable(s) - self._features_string = ", ".join(self._features) - self._truth_string = ", ".join(self._truth) - if self._node_truth: - self._node_truth_string = ", ".join(self._node_truth) - - self._conn: Optional[sqlite3.Connection] = None - - def _post_init(self) -> None: - self._close_connection() - -
-[docs] - def query_table( - self, - table: str, - columns: Union[List[str], str], - sequential_index: Optional[int] = None, - selection: Optional[str] = None, - ) -> List[Tuple[Any, ...]]: - """Query table at a specific index, optionally with some selection.""" - # Check(s) - if isinstance(columns, list): - columns = ", ".join(columns) - - if not selection: # I.e., `None` or `""` - selection = "1=1" # Identically true, to select all - - index = self._get_event_index(sequential_index) - - # Query table - assert index is not None - self._establish_connection(index) - try: - assert self._conn - if sequential_index is None: - combined_selections = selection - else: - combined_selections = ( - f"{self._index_column} = {index} and {selection}" - ) - - result = self._conn.execute( - f"SELECT {columns} FROM {table} WHERE " - f"{combined_selections}" - ).fetchall() - except sqlite3.OperationalError as e: - if "no such column" in str(e): - raise ColumnMissingException(str(e)) - else: - raise e - return result
- - - def _get_all_indices(self) -> List[int]: - self._establish_connection(0) - indices = pd.read_sql_query( - f"SELECT {self._index_column} FROM {self._truth_table}", self._conn - ) - self._close_connection() - return indices.values.ravel().tolist() - - def _get_event_index( - self, sequential_index: Optional[int] - ) -> Optional[int]: - index: int = 0 - if sequential_index is not None: - index_ = self._indices[sequential_index] - if self._database_list is None: - assert isinstance(index_, int) - index = index_ - else: - assert isinstance(index_, list) - index = index_[0] - return index - - # Custom, internal method(s) - # @TODO: Is it necessary to return anything here? - def _establish_connection(self, i: int) -> "SQLiteDataset": - """Make sure that a sqlite3 connection is open.""" - if self._database_list is None: - assert isinstance(self._path, str) - if self._conn is None: - self._conn = sqlite3.connect(self._path) - else: - indices = self._indices[i] - assert isinstance(indices, list) - if self._conn is None: - if self._all_connections_established is False: - self._all_connections = [] - for database in self._database_list: - con = sqlite3.connect(database) - self._all_connections.append(con) - self._all_connections_established = True - self._conn = self._all_connections[indices[1]] - if indices[1] != self._current_database: - self._conn = self._all_connections[indices[1]] - self._current_database = indices[1] - return self - - # @TODO: Is it necessary to return anything here? - def _close_connection(self) -> "SQLiteDataset": - """Make sure that no sqlite3 connection is open. - - This is necessary to calls this before passing to - `torch.DataLoader` such that the dataset replica on each worker - is required to create its own connection (thereby avoiding - `sqlite3.DatabaseError: database disk image is malformed` errors - due to inability to use sqlite3 connection accross processes. - """ - if self._conn is not None: - self._conn.close() - del self._conn - self._conn = None - if self._database_list is not None: - if self._all_connections_established: - for con in self._all_connections: - con.close() - del self._all_connections - self._all_connections_established = False - self._conn = None - return self
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/data/pipeline.html b/_modules/graphnet/data/pipeline.html deleted file mode 100644 index 9ed962b05..000000000 --- a/_modules/graphnet/data/pipeline.html +++ /dev/null @@ -1,593 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.data.pipeline — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.data.pipeline

-"""Class(es) used for analysis in PISA."""
-
-from abc import ABC
-import dill
-from functools import reduce
-import os
-from typing import Dict, List, Optional, Tuple
-
-import numpy as np
-import pandas as pd
-from pytorch_lightning import Trainer
-import sqlite3
-import torch
-from torch.utils.data import DataLoader
-
-from graphnet.data.sqlite.sqlite_utilities import create_table_and_save_to_sql
-from graphnet.training.utils import get_predictions, make_dataloader
-from graphnet.models.graphs import GraphDefinition
-
-from graphnet.utilities.logging import Logger
-
-
-
-[docs] -class InSQLitePipeline(ABC, Logger): - """Create a SQLite database for PISA analysis. - - The database will contain truth and GNN predictions and, if available, - RETRO reconstructions. - """ - - def __init__( - self, - module_dict: Dict, - features: List[str], - truth: List[str], - device: torch.device, - retro_table_name: str = "retro", - outdir: Optional[str] = None, - batch_size: int = 100, - n_workers: int = 10, - pipeline_name: str = "pipeline", - ): - """Initialise the pipeline. - - Args: - module_dict: A dictionary with GNN modules from GraphNet. E.g. - {'energy': gnn_module_for_energy_regression} - features: List of input features for the GNN modules. - truth: List of truth for the GNN ModuleList. - device: The device used for computation. - retro_table_name: Name of the retro table for. - outdir: the directory in which the pipeline database will be - stored. - batch_size: Batch size for inference. - n_workers: Number of workers used in dataloading. - pipeline_name: Name of the pipeline. If such a pipeline already - exists, an error will be prompted to avoid overwriting. - """ - self._pipeline_name = pipeline_name - self._device = device - self.n_workers = n_workers - self._features = features - self._truth = truth - self._batch_size = batch_size - self._outdir = outdir - self._module_dict = module_dict - self._retro_table_name = retro_table_name - - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - def __call__( - self, - database: str, - pulsemap: str, - graph_definition: GraphDefinition, - chunk_size: int = 1000000, - ) -> None: - """Run inference of each field in self._module_dict[target]['']. - - Args: - database: Path to database with pulsemap and truth. - pulsemap: Name of pulsemaps. - graph_definition: GraphDefinition for Dataset - chunk_size: database will be sliced in chunks of size `chunk_size`. - Use this parameter to control memory usage. - """ - outdir = self._get_outdir(database) - if isinstance( - self._device, str - ): # Because pytorch lightning insists on breaking pytorch cuda device naming scheme - device = int(self._device[-1]) - if not os.path.isdir(outdir): - dataloaders, event_batches = self._setup_dataloaders( - graph_definition=graph_definition, - chunk_size=chunk_size, - db=database, - pulsemap=pulsemap, - selection=None, - persistent_workers=False, - ) - i = 0 - for dataloader in dataloaders: - self.info("CHUNK %s / %s" % (i, len(dataloaders))) - df = self._inference(device, dataloader) - truth = self._get_truth(database, event_batches[i].tolist()) - retro = self._get_retro(database, event_batches[i].tolist()) - self._append_to_pipeline(outdir, truth, retro, df) - i += 1 - else: - self.info(outdir) - self.info( - "WARNING - Pipeline named %s already exists! \n Please rename pipeline!" - % self._pipeline_name - ) - - def _setup_dataloaders( - self, - chunk_size: int, - db: str, - pulsemap: str, - graph_definition: GraphDefinition, - selection: Optional[List[int]] = None, - persistent_workers: bool = False, - ) -> Tuple[List[DataLoader], List[np.ndarray]]: - if selection is None: - selection = self._get_all_event_nos(db) - n_chunks = np.ceil(len(selection) / chunk_size) - event_batches = np.array_split(selection, n_chunks) - dataloaders = [] - for batch in event_batches: - dataloaders.append( - make_dataloader( - db=db, - graph_definition=graph_definition, - pulsemaps=pulsemap, - features=self._features, - truth=self._truth, - batch_size=self._batch_size, - shuffle=False, - selection=batch.tolist(), - num_workers=self.n_workers, - persistent_workers=persistent_workers, - ) - ) - return dataloaders, event_batches - - def _get_all_event_nos(self, db: str) -> List[int]: - with sqlite3.connect(db) as con: - query = "SELECT event_no FROM truth" - selection = pd.read_sql(query, con).values.ravel().tolist() - return selection - - def _combine_outputs(self, dataframes: List[pd.DataFrame]) -> pd.DataFrame: - return reduce(lambda x, y: pd.merge(x, y, on="event_no"), dataframes) - - def _inference( - self, device: torch.device, dataloader: DataLoader - ) -> pd.DataFrame: - dataframes = [] - for target in self._module_dict.keys(): - # dataloader = iter(dataloader) - trainer = Trainer(devices=[device], accelerator="gpu") - model = torch.load( - self._module_dict[target]["path"], - map_location="cpu", - pickle_module=dill, - ) - model.eval() - model.inference() - results = get_predictions( - trainer, - model, - dataloader, - self._module_dict[target]["output_column_names"], - additional_attributes=["event_no"], - ) - dataframes.append( - results.sort_values("event_no").reset_index(drop=True) - ) - df = self._combine_outputs(dataframes) - return df - - def _get_outdir(self, database: str) -> str: - if self._outdir is None: - database_name = database.split("/")[-3] - outdir = ( - database.split(database_name)[0] - + database_name - + "/pipelines/" - + self._pipeline_name - ) - else: - outdir = self._outdir - return outdir - - def _get_truth(self, database: str, selection: List[int]) -> pd.DataFrame: - with sqlite3.connect(database) as con: - query = "SELECT * FROM truth WHERE event_no in %s" % str( - tuple(selection) - ) - truth = pd.read_sql(query, con) - return truth - - def _get_retro(self, database: str, selection: List[int]) -> pd.DataFrame: - try: - with sqlite3.connect(database) as con: - query = "SELECT * FROM %s WHERE event_no in %s" % ( - self._retro_table_name, - str(tuple(selection)), - ) - retro = pd.read_sql(query, con) - return retro - except: # noqa: E722 - self.info("%s table does not exist" % self._retro_table_name) - - def _append_to_pipeline( - self, - outdir: str, - truth: pd.DataFrame, - retro: pd.DataFrame, - df: pd.DataFrame, - ) -> None: - os.makedirs(outdir, exist_ok=True) - pipeline_database = outdir + "/%s.db" % self._pipeline_name - create_table_and_save_to_sql(df, "reconstruction", pipeline_database) - create_table_and_save_to_sql(truth, "truth", pipeline_database) - if isinstance(retro, pd.DataFrame): - create_table_and_save_to_sql( - retro, self._retro_table_name, pipeline_database - )
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/deployment/i3modules/graphnet_module.html b/_modules/graphnet/deployment/i3modules/graphnet_module.html deleted file mode 100644 index 6a888402d..000000000 --- a/_modules/graphnet/deployment/i3modules/graphnet_module.html +++ /dev/null @@ -1,817 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.deployment.i3modules.graphnet_module — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.deployment.i3modules.graphnet_module

-"""Class(es) for deploying GraphNeT models in icetray as I3Modules."""
-from abc import abstractmethod
-from typing import TYPE_CHECKING, Any, List, Union, Dict, Tuple, Optional
-
-import dill
-import numpy as np
-import torch
-from torch_geometric.data import Data, Batch
-
-from graphnet.data.extractors import (
-    I3FeatureExtractor,
-    I3FeatureExtractorIceCubeUpgrade,
-)
-from graphnet.models import Model, StandardModel
-from graphnet.models.graphs import GraphDefinition
-from graphnet.utilities.imports import has_icecube_package
-from graphnet.utilities.config import ModelConfig
-
-if has_icecube_package() or TYPE_CHECKING:
-    from icecube.icetray import (
-        I3Module,
-        I3Frame,
-    )  # pyright: reportMissingImports=false
-    from icecube.dataclasses import (
-        I3Double,
-        I3MapKeyVectorDouble,
-    )  # pyright: reportMissingImports=false
-    from icecube import dataclasses, dataio, icetray
-
-
-
-[docs] -class GraphNeTI3Module: - """Base I3 Module for GraphNeT. - - Contains methods for extracting pulsemaps, producing graphs and writing to - frames. - """ - - def __init__( - self, - graph_definition: GraphDefinition, - pulsemap: str, - features: List[str], - pulsemap_extractor: Union[ - List[I3FeatureExtractor], I3FeatureExtractor - ], - gcd_file: str, - ): - """I3Module Constructor. - - Arguments: - graph_definition: An instance of GraphDefinition. E.g. KNNGraph. - pulsemap: the pulse map on which the module functions - features: the features that is used from the pulse map. - E.g. [dom_x, dom_y, dom_z, charge] - pulsemap_extractor: The I3FeatureExtractor used to extract the - pulsemap from the I3Frames - gcd_file: Path to the associated gcd-file. - """ - assert isinstance(graph_definition, GraphDefinition) - self._graph_definition = graph_definition - self._pulsemap = pulsemap - self._features = features - assert isinstance(gcd_file, str), "gcd_file must be string" - self._gcd_file = gcd_file - if isinstance(pulsemap_extractor, list): - self._i3_extractors = pulsemap_extractor - else: - self._i3_extractors = [pulsemap_extractor] - - for i3_extractor in self._i3_extractors: - i3_extractor.set_files(i3_file="", gcd_file=self._gcd_file) - - @abstractmethod - def __call__(self, frame: I3Frame) -> bool: - """Define here how the module acts on the frame. - - Must return True if successful. - - Return True # SUPER IMPORTANT - """ - - def _make_graph( - self, frame: I3Frame - ) -> Data: # py-l-i-n-t-:- -d-i-s-able=invalid-name - """Process Physics I3Frame into graph.""" - # Extract features - input_features = self._extract_feature_array_from_frame(frame) - # Prepare graph data - if len(input_features) > 0: - data = self._graph_definition( - input_features=input_features, - input_feature_names=self._features, - ) - return Batch.from_data_list([data]) - else: - return None - - def _extract_feature_array_from_frame(self, frame: I3Frame) -> np.array: - """Apply the I3FeatureExtractors to the I3Frame. - - Arguments: - frame: Physics I3Frame (PFrame) - - Returns: - array with pulsemap - """ - features = None - for i3extractor in self._i3_extractors: - feature_dict = i3extractor(frame) - features_pulsemap = np.array( - [feature_dict[key] for key in self._features] - ).T - if features is None: - features = features_pulsemap - else: - features = np.concatenate( - (features, features_pulsemap), axis=0 - ) - return features - - def _add_to_frame(self, frame: I3Frame, data: Dict[str, Any]) -> I3Frame: - """Add every field in data to I3Frame. - - Arguments: - frame: I3Frame (physics) - data: Dictionary containing content that will be written to frame. - - Returns: - frame: Same I3Frame as input, but with the new entries - """ - assert isinstance( - data, dict - ), f"data must be of type dict. Got {type(data)}" - for key in data.keys(): - if key not in frame: - frame.Put(key, data[key]) - return frame
- - - -
-[docs] -class I3InferenceModule(GraphNeTI3Module): - """General class for inference on i3 frames.""" - - def __init__( - self, - pulsemap: str, - features: List[str], - pulsemap_extractor: Union[ - List[I3FeatureExtractor], I3FeatureExtractor - ], - model_config: Union[ModelConfig, str], - state_dict: str, - model_name: str, - gcd_file: str, - prediction_columns: Optional[Union[List[str], str]] = None, - ): - """General class for inference on I3Frames (physics). - - Arguments: - pulsemap: the pulsmap that the model is expecting as input. - features: the features of the pulsemap that the model is expecting. - pulsemap_extractor: The extractor used to extract the pulsemap. - model_config: The ModelConfig (or path to it) that summarizes the - model used for inference. - state_dict: Path to state_dict containing the learned weights. - model_name: The name used for the model. Will help define the - named entry in the I3Frame. E.g. "dynedge". - gcd_file: path to associated gcd file. - prediction_columns: column names for the predictions of the model. - Will help define the named entry in the I3Frame. - E.g. ['energy_reco']. Optional. - """ - # Construct model & load weights - self.model = Model.from_config(model_config, trust=True) - self.model.load_state_dict(state_dict) - - super().__init__( - pulsemap=pulsemap, - features=features, - pulsemap_extractor=pulsemap_extractor, - gcd_file=gcd_file, - graph_definition=self.model._graph_definition, - ) - self.model.inference() - - self.model.to("cpu") - if prediction_columns is not None: - if isinstance(prediction_columns, str): - self.prediction_columns = [prediction_columns] - else: - self.prediction_columns = prediction_columns - else: - self.prediction_columns = self.model.prediction_labels - - self.model_name = model_name - - def __call__(self, frame: I3Frame) -> bool: - """Write predictions from model to frame.""" - # inference - graph = self._make_graph(frame) - if graph is not None: - predictions = self._inference(graph) - else: - predictions = np.repeat( - [np.nan], len(self.prediction_columns) - ).reshape(-1, len(self.prediction_columns)) - # Check dimensions of predictions and prediction columns - if len(predictions.shape) > 1: - dim = predictions.shape[1] - else: - dim = len(predictions) - assert dim == len( - self.prediction_columns - ), f"""predictions have shape {dim} but \n - prediction columns have [{self.prediction_columns}]""" - - # Build Dictionary of predictions - data = {} - assert predictions.shape[0] == 1 - for i in range(dim if isinstance(dim, int) else len(dim)): - try: - assert len(predictions[:, i]) == 1 - data[ - self.model_name + "_" + self.prediction_columns[i] - ] = I3Double(float(predictions[:, i][0])) - except IndexError: - data[ - self.model_name + "_" + self.prediction_columns[i] - ] = I3Double(predictions[0]) - - # Submission methods - frame = self._add_to_frame(frame=frame, data=data) - return True - - def _inference(self, data: Data) -> np.ndarray: - # Perform inference - task_predictions = self.model(data) - assert ( - len(task_predictions) == 1 - ), f"""This method assumes a single task. \n - Got {len(task_predictions)} tasks.""" - return self.model(data)[0].detach().numpy()
- - - -
-[docs] -class I3PulseCleanerModule(I3InferenceModule): - """A specialized module for pulse cleaning. - - It is assumed that the model provided has been trained for this. - """ - - def __init__( - self, - pulsemap: str, - features: List[str], - pulsemap_extractor: Union[ - List[I3FeatureExtractor], I3FeatureExtractor - ], - model_config: str, - state_dict: str, - model_name: str, - *, - gcd_file: str, - threshold: float = 0.7, - discard_empty_events: bool = False, - prediction_columns: Optional[Union[List[str], str]] = None, - ): - """General class for inference on I3Frames (physics). - - Arguments: - pulsemap: the pulsmap that the model is expecting as input - (the one that is being cleaned). - features: the features of the pulsemap that the model is expecting. - pulsemap_extractor: The extractor used to extract the pulsemap. - model_config: The ModelConfig (or path to it) that summarizes the - model used for inference. - state_dict: Path to state_dict containing the learned weights. - model_name: The name used for the model. Will help define the named - entry in the I3Frame. E.g. "dynedge". - gcd_file: path to associated gcd file. - threshold: the threshold for being considered a positive case. - E.g., predictions >= threshold will be considered - to be signal, all else noise. - discard_empty_events: When true, this flag will eliminate events - whose cleaned pulse series are empty. Can be used - to speed up processing especially for noise - simulation, since it will not do any writing or - further calculations. - prediction_columns: column names for the predictions of the model. - Will help define the named entry in the I3Frame. - E.g. ['energy_reco']. Optional. - """ - super().__init__( - pulsemap=pulsemap, - features=features, - pulsemap_extractor=pulsemap_extractor, - model_config=model_config, - state_dict=state_dict, - model_name=model_name, - prediction_columns=prediction_columns, - gcd_file=gcd_file, - ) - self._threshold = threshold - self._predictions_key = f"{pulsemap}_{model_name}_Predictions" - self._total_pulsemap_name = f"{pulsemap}_{model_name}_Pulses" - self._discard_empty_events = discard_empty_events - - def __call__(self, frame: I3Frame) -> bool: - """Add a cleaned pulsemap to frame.""" - # inference - gcd_file = self._gcd_file - graph = self._make_graph(frame) - if graph is None: # If there is no pulses to clean - return False - predictions = self._inference(graph) - if self._discard_empty_events: - if sum(predictions > self._threshold) == 0: - return False - - if len(predictions.shape) == 1: - predictions = predictions.reshape(-1, 1) - - assert predictions.shape[1] == 1 - - # Build Dictionary of predictions - data = {} - - predictions_map = self._construct_prediction_map( - frame=frame, predictions=predictions - ) - - # Adds the raw predictions to dictionary - if self._predictions_key not in frame.keys(): - data[self._predictions_key] = predictions_map - - # Create a pulse map mask, indicating the pulses that are over - # threshold (e.g. identified as signal) and therefore should be kept - # Using a lambda function to evaluate which pulses to keep by - # checking the prediction for each pulse - # (Adds the actual pulsemap to dictionary) - if self._total_pulsemap_name not in frame.keys(): - data[ - self._total_pulsemap_name - ] = dataclasses.I3RecoPulseSeriesMapMask( - frame, - self._pulsemap, - lambda om_key, index, pulse: predictions_map[om_key][index] - >= self._threshold, - ) - - # Submit predictions and general pulsemap - frame = self._add_to_frame(frame=frame, data=data) - data = {} - # Adds an additional pulsemap for each DOM type - if isinstance( - self._i3_extractors[0], I3FeatureExtractorIceCubeUpgrade - ): - mDOMMap, DEggMap, IceCubeMap = self._split_pulsemap_in_dom_types( - frame=frame, gcd_file=gcd_file - ) - - if f"{self._total_pulsemap_name}_mDOMs_Only" not in frame.keys(): - data[ - f"{self._total_pulsemap_name}_mDOMs_Only" - ] = dataclasses.I3RecoPulseSeriesMap(mDOMMap) - - if f"{self._total_pulsemap_name}_dEggs_Only" not in frame.keys(): - data[ - f"{self._total_pulsemap_name}_dEggs_Only" - ] = dataclasses.I3RecoPulseSeriesMap(DEggMap) - - if f"{self._total_pulsemap_name}_pDOMs_Only" not in frame.keys(): - data[ - f"{self._total_pulsemap_name}_pDOMs_Only" - ] = dataclasses.I3RecoPulseSeriesMap(IceCubeMap) - - # Submits the additional pulsemaps to the frame - frame = self._add_to_frame(frame=frame, data=data) - - return True - - def _split_pulsemap_in_dom_types( - self, frame: I3Frame, gcd_file: Any - ) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]: - """Will split the cleaned pulsemap into multiple pulsemaps. - - Arguments: - frame: I3Frame (physics) - gcd_file: path to associated gcd file - - Returns: - mDOMMap, DeGGMap, IceCubeMap - """ - g = dataio.I3File(gcd_file) - gFrame = g.pop_frame() - while "I3Geometry" not in gFrame.keys(): - gFrame = g.pop_frame() - omGeoMap = gFrame["I3Geometry"].omgeo - - mDOMMap, DEggMap, IceCubeMap = {}, {}, {} - pulses = dataclasses.I3RecoPulseSeriesMap.from_frame( - frame, self._total_pulsemap_name - ) - for P in pulses: - om = omGeoMap[P[0]] - if om.omtype == 130: # "mDOM" - mDOMMap[P[0]] = P[1] - elif om.omtype == 120: # "DEgg" - DEggMap[P[0]] = P[1] - elif om.omtype == 20: # "IceCube / pDOM" - IceCubeMap[P[0]] = P[1] - return mDOMMap, DEggMap, IceCubeMap - - def _construct_prediction_map( - self, frame: I3Frame, predictions: np.ndarray - ) -> I3MapKeyVectorDouble: - """Make a pulsemap from predictions (for all OM types). - - Arguments: - frame: I3Frame (physics) - predictions: predictions from Model. - - Returns: - predictions_map: a pulsemap from predictions - """ - pulsemap = dataclasses.I3RecoPulseSeriesMap.from_frame( - frame, self._pulsemap - ) - - idx = 0 - predictions = predictions.squeeze(1) - predictions_map = dataclasses.I3MapKeyVectorDouble() - for om_key, pulses in pulsemap.items(): - num_pulses = len(pulses) - predictions_map[om_key] = predictions[ - idx : idx + num_pulses - ].tolist() - idx += num_pulses - - # Checks - assert idx == len( - predictions - ), """Not all predictions were mapped to pulses,\n - validation of predictions have failed.""" - - assert ( - pulsemap.keys() == predictions_map.keys() - ), """Input pulse map and predictions map do \n - not contain exactly the same OMs""" - return predictions_map
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/coarsening.html b/_modules/graphnet/models/coarsening.html deleted file mode 100644 index 301d35d73..000000000 --- a/_modules/graphnet/models/coarsening.html +++ /dev/null @@ -1,708 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.coarsening — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.coarsening

-"""Class(es) for coarsening operations (i.e., clustering, or local pooling)."""
-
-from abc import abstractmethod
-from typing import List, Optional, Union
-from copy import deepcopy
-import torch
-from torch import LongTensor, Tensor
-from torch_geometric.data import Data, Batch
-from sklearn.cluster import DBSCAN
-
-# from torch_geometric.utils import unbatch_edge_index
-from graphnet.models.components.pool import (
-    group_by,
-    avg_pool,
-    max_pool,
-    min_pool,
-    sum_pool,
-    avg_pool_x,
-    max_pool_x,
-    min_pool_x,
-    sum_pool_x,
-    std_pool_x,
-)
-from graphnet.models import Model
-
-# Utility method(s)
-from torch_geometric.utils import degree
-
-# NOTE: From [https://github.com/pyg-team/pytorch_geometric/pull/4903]
-# TODO:  Remove once bumping to torch_geometric>=2.1.0
-#       See [https://github.com/pyg-team/pytorch_geometric/blob/master/CHANGELOG.md]
-
-
-
-[docs] -def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: - # noqa: D401 - r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector. - - Args: - edge_index (Tensor): The edge_index tensor. Must be ordered. - batch (LongTensor): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. Must be ordered. - :rtype: :class:`List[Tensor]` - """ - deg = degree(batch, dtype=torch.int64) - ptr = torch.cat([deg.new_zeros(1), deg.cumsum(dim=0)[:-1]], dim=0) - - edge_batch = batch[edge_index[0]] - edge_index = edge_index - ptr[edge_batch] - sizes = degree(edge_batch, dtype=torch.int64).cpu().tolist() - return edge_index.split(sizes, dim=1)
- - - -
-[docs] -class Coarsening(Model): - """Base class for coarsening operations.""" - - # Class variables - reduce_options = { - "avg": (avg_pool, avg_pool_x), - "min": (min_pool, min_pool_x), - "max": (max_pool, max_pool_x), - "sum": (sum_pool, sum_pool_x), - } - - def __init__( - self, - reduce: str = "avg", - transfer_attributes: bool = True, - ): - """Construct `Coarsening`.""" - assert reduce in self.reduce_options - - ( - self._reduce_method, - self._attribute_reduce_method, - ) = self.reduce_options[reduce] - self._do_transfer_attributes = transfer_attributes - - # Base class constructor - super().__init__() - - @abstractmethod - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - - def _additional_features(self, cluster: LongTensor, data: Batch) -> Tensor: - """Perform additional poolings of feature tensor `x` on `data`. - - By default the nominal `pooling_method` is used for features as well. - This method can be overwritten for bespoke coarsening operations. - """ - - def _transfer_attributes( - self, cluster: LongTensor, original_data: Batch, pooled_data: Batch - ) -> Batch: - """Transfer attributes on `original_data` to `pooled_data`.""" - # Check(s) - if not self._do_transfer_attributes: - return pooled_data - - attributes = list(original_data._store.keys()) - batch: Optional[LongTensor] = original_data.batch - for ix, attr in enumerate(attributes): - if attr not in pooled_data._store: - values: Tensor = getattr(original_data, attr) - - attr_is_node_level_tensor = False - if isinstance(values, Tensor): - if batch is None: - attr_is_node_level_tensor = ( - values.dim() > 1 or values.size(dim=0) > 1 - ) - else: - attr_is_node_level_tensor = ( - values.size() == original_data.batch.size() - ) - - if attr_is_node_level_tensor: - values = self._attribute_reduce_method( - cluster, - values, - batch=torch.zeros_like(values, dtype=torch.int32), - )[0] - - setattr(pooled_data, attr, values) - - return pooled_data - -
-[docs] - def forward(self, data: Union[Data, Batch]) -> Union[Data, Batch]: - """Perform coarsening operation.""" - # Get tensor of cluster indices for each node. - cluster: LongTensor = self._perform_clustering(data) - - # Check whether a graph has already been built. Otherwise, set a dummy - # connectivity, as this is required by pooling functions. - edge_index = data.edge_index - if edge_index is None: - data.edge_index = torch.tensor([[]], dtype=torch.int64) - - # Pool `data` object, including `x`, `batch`. and `edge_index`. - pooled_data: Batch = self._reduce_method(cluster, data) - - # Optionally overwrite feature tensor - x = self._additional_features(cluster, data) - if x is not None: - pooled_data.x = torch.cat( - ( - pooled_data.x, - x, - ), - dim=1, - ) - - # Reset `edge_index` if necessary. - if edge_index is None: - data.edge_index = edge_index - pooled_data.edge_index = edge_index - - # Transfer attributes on `data`, pooling as required. - pooled_data = self._transfer_attributes(cluster, data, pooled_data) - - # Reconstruct Batch Attributes - if isinstance(data, Batch): # if a Batch object - pooled_data = self._reconstruct_batch(data, pooled_data) - return pooled_data
- - - def _reconstruct_batch(self, original: Data, pooled: Data) -> Data: - pooled = self._add_slice_dict(original, pooled) - pooled = self._add_inc_dict(original, pooled) - return pooled - - def _add_slice_dict(self, original: Data, pooled: Data) -> Data: - # Copy original slice_dict and count nodes in each graph in pooled batch - slice_dict = deepcopy(original._slice_dict) - _, counts = torch.unique_consecutive(pooled.batch, return_counts=True) - # Reconstruct the entry in slice_dict for pulsemaps - only these are affected by pooling - pulsemap_slice = [0] - for i in range(len(counts)): - pulsemap_slice.append(pulsemap_slice[i] + counts[i].item()) - - # Identifies pulsemap entries in slice_dict and set them to pulsemap_slice - for field in slice_dict.keys(): - if (original._num_graphs) == slice_dict[field][-1]: - pass # not pulsemap, so skip - else: - slice_dict[field] = pulsemap_slice - pooled._slice_dict = slice_dict - return pooled - - def _add_inc_dict(self, original: Data, pooled: Data) -> Data: - # not changed by coarsening - pooled._inc_dict = deepcopy(original._inc_dict) - return pooled
- - - -
-[docs] -class AttributeCoarsening(Coarsening): - """Coarsen pulses based on specified attributes.""" - - def __init__( - self, - attributes: List[str], - reduce: str = "avg", - transfer_attributes: bool = True, - ): - """Construct `SimpleCoarsening`.""" - self._attributes = attributes - - # Base class constructor - super().__init__(reduce, transfer_attributes) - - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - dom_index = group_by(data, self._attributes) - return dom_index
- - - -
-[docs] -class DOMCoarsening(Coarsening): - """Coarsen pulses to DOM-level.""" - - def __init__( - self, - reduce: str = "avg", - transfer_attributes: bool = True, - keys: Optional[List[str]] = None, - ): - """Cluster pulses on the same DOM.""" - super().__init__(reduce, transfer_attributes) - if keys is None: - self._keys = [ - "dom_x", - "dom_y", - "dom_z", - "rde", - "pmt_area", - ] - else: - self._keys = keys - - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - dom_index = group_by(data, self._keys) - return dom_index
- - - -
-[docs] -class CustomDOMCoarsening(DOMCoarsening): - """Coarsen pulses to DOM-level with additional attributes.""" - - def _additional_features(self, cluster: LongTensor, data: Data) -> Tensor: - """Perform Additional poolings of feature tensor `x` on `data`.""" - batch = data.batch - - features = data.features - if batch is not None: - features = [feats[0] for feats in features] - - ix_time = features.index("dom_time") - ix_charge = features.index("charge") - - time = data.x[:, ix_time] - charge = data.x[:, ix_charge] - - x = torch.stack( - ( - min_pool_x(cluster, time, batch)[0], - max_pool_x(cluster, time, batch)[0], - std_pool_x(cluster, time, batch)[0], - min_pool_x(cluster, charge, batch)[0], - max_pool_x(cluster, charge, batch)[0], - std_pool_x(cluster, charge, batch)[0], - sum_pool_x(cluster, torch.ones_like(charge), batch)[ - 0 - ], # Num. nodes (pulses) per cluster (DOM) - ), - dim=1, - ) - - return x
- - - -
-[docs] -class DOMAndTimeWindowCoarsening(Coarsening): - """Coarsen pulses to DOM-level, with additional time-window clustering.""" - - def __init__( - self, - time_window: float, - reduce: str = "avg", - transfer_attributes: bool = True, - keys: List[str] = [ - "dom_x", - "dom_y", - "dom_z", - "rde", - "pmt_area", - ], - time_key: str = "dom_time", - ): - """Cluster pulses on the same DOM within `time_window`.""" - super().__init__(reduce, transfer_attributes) - self._time_window = time_window - self._cluster_method = DBSCAN(self._time_window, min_samples=1) - self._keys = keys - self._time_key = time_key - - def _perform_clustering(self, data: Union[Data, Batch]) -> LongTensor: - """Cluster nodes in `data` by assigning a cluster index to each.""" - dom_index = group_by(data, self._keys) - if data.batch is not None: - features = data.features[0] - else: - features = data.features - - ix_time = features.index(self._time_key) - hit_times = data.x[:, ix_time] - - # Scale up dom_index to make sure clusters are well separated - times_and_domids = torch.stack( - [ - hit_times, - dom_index * self._time_window * 10, - ] - ).T - clusters = torch.tensor( - self._cluster_method.fit_predict(times_and_domids.cpu()), - device=hit_times.device, - ) - - return clusters
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/components/layers.html b/_modules/graphnet/models/components/layers.html deleted file mode 100644 index 80fbc5ec0..000000000 --- a/_modules/graphnet/models/components/layers.html +++ /dev/null @@ -1,579 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.components.layers — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.components.layers

-"""Class(es) implementing layers to be used in `graphnet` models."""
-
-from typing import Any, Callable, Optional, Sequence, Union, List, Tuple
-
-import torch
-from torch.functional import Tensor
-from torch_geometric.nn import EdgeConv
-from torch_geometric.nn.pool import knn_graph
-from torch_geometric.typing import Adj, PairTensor
-from torch_geometric.nn.conv import MessagePassing
-from torch_geometric.nn.inits import reset
-from torch.nn.modules import TransformerEncoder, TransformerEncoderLayer
-from torch.nn.modules.normalization import LayerNorm
-from torch_geometric.utils import to_dense_batch
-from pytorch_lightning import LightningModule
-
-
-
-[docs] -class DynEdgeConv(EdgeConv, LightningModule): - """Dynamical edge convolution layer.""" - - def __init__( - self, - nn: Callable, - aggr: str = "max", - nb_neighbors: int = 8, - features_subset: Optional[Union[Sequence[int], slice]] = None, - **kwargs: Any, - ): - """Construct `DynEdgeConv`. - - Args: - nn: The MLP/torch.Module to be used within the `EdgeConv`. - aggr: Aggregation method to be used with `EdgeConv`. - nb_neighbors: Number of neighbours to be clustered after the - `EdgeConv` operation. - features_subset: Subset of features in `Data.x` that should be used - when dynamically performing the new graph clustering after the - `EdgeConv` operation. Defaults to all features. - **kwargs: Additional features to be passed to `EdgeConv`. - """ - # Check(s) - if features_subset is None: - features_subset = slice(None) # Use all features - assert isinstance(features_subset, (list, slice)) - - # Base class constructor - super().__init__(nn=nn, aggr=aggr, **kwargs) - - # Additional member variables - self.nb_neighbors = nb_neighbors - self.features_subset = features_subset - -
-[docs] - def forward( - self, x: Tensor, edge_index: Adj, batch: Optional[Tensor] = None - ) -> Tensor: - """Forward pass.""" - # Standard EdgeConv forward pass - x = super().forward(x, edge_index) - - # Recompute adjacency - edge_index = knn_graph( - x=x[:, self.features_subset], - k=self.nb_neighbors, - batch=batch, - ).to(self.device) - - return x, edge_index
-
- - - -
-[docs] -class EdgeConvTito(MessagePassing, LightningModule): - """Implementation of EdgeConvTito layer used in TITO solution for. - - 'IceCube - Neutrinos in Deep' kaggle competition. - """ - - def __init__( - self, - nn: Callable, - aggr: str = "max", - **kwargs: Any, - ): - """Construct `EdgeConvTito`. - - Args: - nn: The MLP/torch.Module to be used within the `EdgeConvTito`. - aggr: Aggregation method to be used with `EdgeConvTito`. - **kwargs: Additional features to be passed to `EdgeConvTito`. - """ - super().__init__(aggr=aggr, **kwargs) - self.nn = nn - self.reset_parameters() - -
-[docs] - def reset_parameters(self) -> None: - """Reset all learnable parameters of the module.""" - reset(self.nn)
- - -
-[docs] - def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor: - """Forward pass.""" - if isinstance(x, Tensor): - x = (x, x) - # propagate_type: (x: PairTensor) - return self.propagate(edge_index, x=x, size=None)
- - -
-[docs] - def message(self, x_i: Tensor, x_j: Tensor) -> Tensor: - """Edgeconvtito message passing.""" - return self.nn( - torch.cat([x_i, x_j - x_i, x_j], dim=-1) - ) # EdgeConvTito
- - - def __repr__(self) -> str: - """Print out module name.""" - return f"{self.__class__.__name__}(nn={self.nn})"
- - - -
-[docs] -class DynTrans(EdgeConvTito, LightningModule): - """Implementation of dynTrans1 layer used in TITO solution for. - - 'IceCube - Neutrinos in Deep' kaggle competition. - """ - - def __init__( - self, - layer_sizes: Optional[List[int]] = None, - aggr: str = "max", - features_subset: Optional[Union[Sequence[int], slice]] = None, - n_head: int = 8, - **kwargs: Any, - ): - """Construct `DynTrans`. - - Args: - nn: The MLP/torch.Module to be used within the `DynTrans`. - layer_sizes: List of layer sizes to be used in `DynTrans`. - aggr: Aggregation method to be used with `DynTrans`. - features_subset: Subset of features in `Data.x` that should be used - when dynamically performing the new graph clustering after the - `EdgeConv` operation. Defaults to all features. - n_head: Number of heads to be used in the multiheadattention models. - **kwargs: Additional features to be passed to `DynTrans`. - """ - # Check(s) - if features_subset is None: - features_subset = slice(None) # Use all features - assert isinstance(features_subset, (list, slice)) - - if layer_sizes is None: - layer_sizes = [256, 256, 256] - layers = [] - for ix, (nb_in, nb_out) in enumerate( - zip(layer_sizes[:-1], layer_sizes[1:]) - ): - if ix == 0: - nb_in *= 3 # edgeConv1 - layers.append(torch.nn.Linear(nb_in, nb_out)) - layers.append(torch.nn.LeakyReLU()) - d_model = nb_out - - # Base class constructor - super().__init__(nn=torch.nn.Sequential(*layers), aggr=aggr, **kwargs) - - # Additional member variables - self.features_subset = features_subset - - self.norm1 = LayerNorm(d_model, eps=1e-5) # lNorm - - # Transformer layer(s) - encoder_layer = TransformerEncoderLayer( - d_model=d_model, - nhead=n_head, - batch_first=True, - norm_first=False, - ) - self._transformer_encoder = TransformerEncoder( - encoder_layer, num_layers=1 - ) - -
-[docs] - def forward( - self, x: Tensor, edge_index: Adj, batch: Optional[Tensor] = None - ) -> Tensor: - """Forward pass.""" - x_out = super().forward(x, edge_index) - - if x_out.shape[-1] == x.shape[-1]: - x = x + x_out - else: - x = x_out - - x = self.norm1(x) # lNorm - - # Transformer layer - x, mask = to_dense_batch(x, batch) - x = self._transformer_encoder(x, src_key_padding_mask=~mask) - x = x[mask] - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/components/pool.html b/_modules/graphnet/models/components/pool.html deleted file mode 100644 index 11ce78f3d..000000000 --- a/_modules/graphnet/models/components/pool.html +++ /dev/null @@ -1,656 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.components.pool — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.components.pool

-"""Functions for performing pooling/clustering/coarsening."""
-
-from typing import Any, Callable, List, Optional, Union
-
-import torch
-from torch import LongTensor, Tensor
-from torch_geometric.data import Data, Batch
-from torch_geometric.nn.pool.consecutive import consecutive_cluster
-from torch_geometric.nn.pool.pool import pool_edge, pool_batch, pool_pos
-from torch_scatter import scatter, scatter_std
-
-from torch_geometric.nn.pool import (
-    avg_pool,
-    max_pool,
-    avg_pool_x,
-    max_pool_x,
-)
-
-
-
-[docs] -def min_pool( - cluster: LongTensor, data: Data, transform: Optional[Any] = None -) -> Data: - """Perform min-pooling of `Data`. - - Like `max_pool, just negating `data.x`. - """ - data.x = -data.x - data_pooled = max_pool( - cluster, - data, - transform, - ) - data.x = -data.x - data_pooled.x = -data_pooled.x - return data_pooled
- - - -
-[docs] -def min_pool_x( - cluster: LongTensor, - x: Tensor, - batch: LongTensor, - size: Optional[int] = None, -) -> Tensor: - """Perform min-pooling of `Tensor`. - - Like `max_pool_x, just negating `x`. - """ - ret = max_pool_x(cluster, -x, batch, size) - if size is None: - return (-ret[0], ret[1]) - else: - return -ret
- - - -
-[docs] -def sum_pool_and_distribute( - tensor: Tensor, - cluster_index: LongTensor, - batch: Optional[LongTensor] = None, -) -> Tensor: - """Sum-pool values and distribute result to the individual nodes.""" - if batch is None: - batch = torch.zeros(tensor.size(dim=0)).long() - tensor_pooled, _ = sum_pool_x(cluster_index, tensor, batch) - inv, _ = consecutive_cluster(cluster_index) - tensor_unpooled = tensor_pooled[inv] - return tensor_unpooled
- - - -def _group_identical( - tensor: Tensor, batch: Optional[LongTensor] = None -) -> LongTensor: - """Group rows in `tensor` that are identical. - - Args: - tensor: Tensor of shape [N, F]. - batch: Batch indices, to only group identical rows within batches. - - Returns: - List of group indices, from 0 to num. groups - 1, assigning all - identical rows to the same group. - """ - if batch is not None: - tensor = torch.cat((batch.unsqueeze(dim=1), tensor), dim=1) - return torch.unique(tensor, return_inverse=True, sorted=False, dim=0)[1] - - -
-[docs] -def group_by(data: Union[Data, Batch], keys: List[str]) -> LongTensor: - """Group nodes in `data` that have identical values of `keys`. - - This grouping is done with in each event in case of batching. This allows - for, e.g., assigning the same index to all pulses on the same PMT or DOM in - the same event. This can be used for coarsening graphs, e.g., from pulse- - level to DOM-level by aggregating feature across each group returned by this - method. - - Example: - Given: - data.f1 = [1,1,2,2,2] - data.f2 = [6,7,7,7,8] - Calls: - groupby(data, ['f1']) -> [0, 0, 1, 1, 1] - groupby(data, ['f2']) -> [0, 1, 1, 1, 2] - groupby(data, ['f1', 'f2']) -> [0, 1, 2, 2, 3] - """ - features = [getattr(data, key) for key in keys] - tensor = torch.stack(features).T # .int() @TODO: Required? Use rounding? - batch = getattr(data, "batch", None) - index = _group_identical(tensor, batch) - return index
- - - -
-[docs] -def group_pulses_to_dom(data: Data) -> Data: - """Group pulses on the same DOM, using DOM and string number.""" - data.dom_index = group_by(data, ["dom_number", "string"]) - return data
- - - -
-[docs] -def group_pulses_to_pmt(data: Data) -> Data: - """Group pulses on the same PMT, using PMT, DOM, and string number.""" - data.pmt_index = group_by(data, ["pmt_number", "dom_number", "string"]) - return data
- - - -# Below mirroring `torch_geometric.nn.pool.{avg,max}_pool.py`. -def _sum_pool_x( - cluster: LongTensor, x: Tensor, size: Optional[int] = None -) -> Tensor: - return scatter(x, cluster, dim=0, dim_size=size, reduce="sum") - - -def _std_pool_x( - cluster: LongTensor, x: Tensor, size: Optional[int] = None -) -> Tensor: - return scatter_std(x, cluster, dim=0, dim_size=size, unbiased=False) - - -
-[docs] -def sum_pool_x( - cluster: LongTensor, - x: Tensor, - batch: LongTensor, - size: Optional[int] = None, -) -> Tensor: - r"""Sum-pool node features according to the clustering defined in `cluster`. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - x: Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`. - batch: Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, - B-1\}}^N`, which assigns each node to a specific example. - size: The maximum number of clusters in a single - example. This property is useful to obtain a batch-wise dense - representation, *e.g.* for applying FC layers, but should only be - used if the size of the maximum number of clusters per example is - known in advance. - """ - if size is not None: - batch_size = int(batch.max().item()) + 1 - return _sum_pool_x(cluster, x, batch_size * size), None - - cluster, perm = consecutive_cluster(cluster) - x = _sum_pool_x(cluster, x) - batch = pool_batch(perm, batch) - - return x, batch
- - - -
-[docs] -def std_pool_x( - cluster: LongTensor, - x: Tensor, - batch: LongTensor, - size: Optional[int] = None, -) -> Tensor: - r"""Std-pool node features according to the clustering defined in `cluster`. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - x: Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`. - batch: Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, - B-1\}}^N`, which assigns each node to a specific example. - size: The maximum number of clusters in a single - example. This property is useful to obtain a batch-wise dense - representation, *e.g.* for applying FC layers, but should only be - used if the size of the maximum number of clusters per example is - known in advance. - """ - if size is not None: - batch_size = int(batch.max().item()) + 1 - return _std_pool_x(cluster, x, batch_size * size), None - - cluster, perm = consecutive_cluster(cluster) - x = _std_pool_x(cluster, x) - batch = pool_batch(perm, batch) - - return x, batch
- - - -
-[docs] -def sum_pool( - cluster: LongTensor, data: Data, transform: Optional[Callable] = None -) -> Data: - r"""Pool and coarsen graph according to the clustering defined in `cluster`. - - All nodes within the same cluster will be represented as one node. - Final node features are defined by the *sum* of features of all nodes - within the same cluster, node positions are averaged and edge indices are - defined to be the union of the edge indices of all nodes within the same - cluster. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - data: Graph data object. - transform: A function/transform that takes in the - coarsened and pooled :obj:`torch_geometric.data.Data` object and - returns a transformed version. - """ - cluster, perm = consecutive_cluster(cluster) - - x = None if data.x is None else _sum_pool_x(cluster, data.x) - index, attr = pool_edge(cluster, data.edge_index, data.edge_attr) - batch = None if data.batch is None else pool_batch(perm, data.batch) - pos = None if data.pos is None else pool_pos(cluster, data.pos) - - data = Batch(batch=batch, x=x, edge_index=index, edge_attr=attr, pos=pos) - - if transform is not None: - data = transform(data) - - return data
- - - -
-[docs] -def std_pool( - cluster: LongTensor, data: Data, transform: Optional[Callable] = None -) -> Data: - r"""Pool and coarsen graph according to the clustering defined in `cluster`. - - All nodes within the same cluster will be represented as one node. - Final node features are defined by the *std* of features of all nodes - within the same cluster, node positions are averaged and edge indices are - defined to be the union of the edge indices of all nodes within the same - cluster. - - Args: - cluster: Cluster vector :math:`\mathbf{c} \in \{ 0, - \ldots, N - 1 \}^N`, which assigns each node to a specific cluster. - data: Graph data object. - transform: A function/transform that takes in the - coarsened and pooled :obj:`torch_geometric.data.Data` object and - returns a transformed version. - """ - cluster, perm = consecutive_cluster(cluster) - - x = None if data.x is None else _std_pool_x(cluster, data.x) - index, attr = pool_edge(cluster, data.edge_index, data.edge_attr) - batch = None if data.batch is None else pool_batch(perm, data.batch) - pos = None if data.pos is None else pool_pos(cluster, data.pos) - - data = Batch(batch=batch, x=x, edge_index=index, edge_attr=attr, pos=pos) - - if transform is not None: - data = transform(data) - - return data
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/convnet.html b/_modules/graphnet/models/gnn/convnet.html deleted file mode 100644 index 9c1b3f2f7..000000000 --- a/_modules/graphnet/models/gnn/convnet.html +++ /dev/null @@ -1,484 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.convnet — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.convnet

-"""Implementation of the ConvNet GNN model architecture.
-
-Author: Martin Ha Minh
-"""
-
-import torch
-from torch import Tensor
-from torch.nn import BatchNorm1d, Linear, Dropout
-import torch.nn.functional as F
-from torch_geometric.nn import TAGConv, global_add_pool, global_max_pool
-from torch_geometric.data import Data
-
-from graphnet.models.gnn.gnn import GNN
-
-
-
-[docs] -class ConvNet(GNN): - """ConvNet (convolutional network) model.""" - - def __init__( - self, - nb_inputs: int, - nb_outputs: int, - nb_intermediate: int = 128, - dropout_ratio: float = 0.3, - ): - """Construct `ConvNet`. - - Args: - nb_inputs: Number of input features, i.e. dimension of input - layer. - nb_outputs: Number of prediction labels, i.e. dimension of - output layer. - nb_intermediate: Number of nodes in intermediate layer(s). - dropout_ratio: Fraction of nodes to drop. - """ - # Base class constructor - super().__init__(nb_inputs, nb_outputs) - - # Member variables - self.nb_intermediate = nb_intermediate - self.nb_intermediate2 = 6 * self.nb_intermediate - - # Architecture configuration - self.conv1 = TAGConv(self.nb_inputs, self.nb_intermediate, 2) - self.conv2 = TAGConv(self.nb_intermediate, self.nb_intermediate, 2) - self.conv3 = TAGConv(self.nb_intermediate, self.nb_intermediate, 2) - - self.batchnorm1 = BatchNorm1d(self.nb_intermediate2) - - self.linear1 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear2 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear3 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear4 = Linear(self.nb_intermediate2, self.nb_intermediate2) - self.linear5 = Linear(self.nb_intermediate2, self.nb_intermediate2) - - self.drop1 = Dropout(dropout_ratio) - self.drop2 = Dropout(dropout_ratio) - self.drop3 = Dropout(dropout_ratio) - self.drop4 = Dropout(dropout_ratio) - self.drop5 = Dropout(dropout_ratio) - - self.out = Linear(self.nb_intermediate2, self.nb_outputs) - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - # Graph convolutional operations - x = F.leaky_relu(self.conv1(x, edge_index)) - x1 = torch.cat( - [ - global_add_pool(x, batch), - global_max_pool(x, batch), - ], - dim=1, - ) - - x = F.leaky_relu(self.conv2(x, edge_index)) - x2 = torch.cat( - [ - global_add_pool(x, batch), - global_max_pool(x, batch), - ], - dim=1, - ) - - x = F.leaky_relu(self.conv3(x, edge_index)) - x3 = torch.cat( - [ - global_add_pool(x, batch), - global_max_pool(x, batch), - ], - dim=1, - ) - - # Skip-cat - x = torch.cat([x1, x2, x3], dim=1) - - # Batch-normalising intermediate features - x = self.batchnorm1(x) - - # Post-processing - x = F.leaky_relu(self.linear1(x)) - x = self.drop1(x) - x = F.leaky_relu(self.linear2(x)) - x = self.drop2(x) - x = F.leaky_relu(self.linear3(x)) - x = self.drop3(x) - x = F.leaky_relu(self.linear4(x)) - x = self.drop4(x) - x = F.leaky_relu(self.linear5(x)) - x = self.drop5(x) - - # Read-out - x = self.out(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/dynedge.html b/_modules/graphnet/models/gnn/dynedge.html deleted file mode 100644 index 90dbb8077..000000000 --- a/_modules/graphnet/models/gnn/dynedge.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.dynedge — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.dynedge

-"""Implementation of the DynEdge GNN model architecture."""
-from typing import List, Optional, Sequence, Tuple, Union
-
-import torch
-from torch import Tensor, LongTensor
-from torch_geometric.data import Data
-from torch_scatter import scatter_max, scatter_mean, scatter_min, scatter_sum
-
-from graphnet.models.components.layers import DynEdgeConv
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.utils import calculate_xyzt_homophily
-
-GLOBAL_POOLINGS = {
-    "min": scatter_min,
-    "max": scatter_max,
-    "sum": scatter_sum,
-    "mean": scatter_mean,
-}
-
-
-
-[docs] -class DynEdge(GNN): - """DynEdge (dynamical edge convolutional) model.""" - - def __init__( - self, - nb_inputs: int, - *, - nb_neighbours: int = 8, - features_subset: Optional[Union[List[int], slice]] = None, - dynedge_layer_sizes: Optional[List[Tuple[int, ...]]] = None, - post_processing_layer_sizes: Optional[List[int]] = None, - readout_layer_sizes: Optional[List[int]] = None, - global_pooling_schemes: Optional[Union[str, List[str]]] = None, - add_global_variables_after_pooling: bool = False, - ): - """Construct `DynEdge`. - - Args: - nb_inputs: Number of input features on each node. - nb_neighbours: Number of neighbours to used in the k-nearest - neighbour clustering which is performed after each (dynamical) - edge convolution. - features_subset: The subset of latent features on each node that - are used as metric dimensions when performing the k-nearest - neighbours clustering. Defaults to [0,1,2]. - dynedge_layer_sizes: The layer sizes, or latent feature dimenions, - used in the `DynEdgeConv` layer. Each entry in - `dynedge_layer_sizes` corresponds to a single `DynEdgeConv` - layer; the integers in the corresponding tuple corresponds to - the layer sizes in the multi-layer perceptron (MLP) that is - applied within each `DynEdgeConv` layer. That is, a list of - size-two tuples means that all `DynEdgeConv` layers contain a - two-layer MLP. - Defaults to [(128, 256), (336, 256), (336, 256), (336, 256)]. - post_processing_layer_sizes: Hidden layer sizes in the MLP - following the skip-concatenation of the outputs of each - `DynEdgeConv` layer. Defaults to [336, 256]. - readout_layer_sizes: Hidden layer sizes in the MLP following the - post-processing _and_ optional global pooling. As this is the - last layer(s) in the model, the last layer in the read-out - yields the output of the `DynEdge` model. Defaults to [128,]. - global_pooling_schemes: The list global pooling schemes to use. - Options are: "min", "max", "mean", and "sum". - add_global_variables_after_pooling: Whether to add global variables - after global pooling. The alternative is to added (distribute) - them to the individual nodes before any convolutional - operations. - """ - # Latent feature subset for computing nearest neighbours in DynEdge. - if features_subset is None: - features_subset = slice(0, 3) - - # DynEdge layer sizes - if dynedge_layer_sizes is None: - dynedge_layer_sizes = [ - ( - 128, - 256, - ), - ( - 336, - 256, - ), - ( - 336, - 256, - ), - ( - 336, - 256, - ), - ] - - assert isinstance(dynedge_layer_sizes, list) - assert len(dynedge_layer_sizes) - assert all(isinstance(sizes, tuple) for sizes in dynedge_layer_sizes) - assert all(len(sizes) > 0 for sizes in dynedge_layer_sizes) - assert all( - all(size > 0 for size in sizes) for sizes in dynedge_layer_sizes - ) - - self._dynedge_layer_sizes = dynedge_layer_sizes - - # Post-processing layer sizes - if post_processing_layer_sizes is None: - post_processing_layer_sizes = [ - 336, - 256, - ] - - assert isinstance(post_processing_layer_sizes, list) - assert len(post_processing_layer_sizes) - assert all(size > 0 for size in post_processing_layer_sizes) - - self._post_processing_layer_sizes = post_processing_layer_sizes - - # Read-out layer sizes - if readout_layer_sizes is None: - readout_layer_sizes = [ - 128, - ] - - assert isinstance(readout_layer_sizes, list) - assert len(readout_layer_sizes) - assert all(size > 0 for size in readout_layer_sizes) - - self._readout_layer_sizes = readout_layer_sizes - - # Global pooling scheme(s) - if isinstance(global_pooling_schemes, str): - global_pooling_schemes = [global_pooling_schemes] - - if isinstance(global_pooling_schemes, list): - for pooling_scheme in global_pooling_schemes: - assert ( - pooling_scheme in GLOBAL_POOLINGS - ), f"Global pooling scheme {pooling_scheme} not supported." - else: - assert global_pooling_schemes is None - - self._global_pooling_schemes = global_pooling_schemes - - if add_global_variables_after_pooling: - assert self._global_pooling_schemes, ( - "No global pooling schemes were request, so cannot add global" - " variables after pooling." - ) - self._add_global_variables_after_pooling = ( - add_global_variables_after_pooling - ) - - # Base class constructor - super().__init__(nb_inputs, self._readout_layer_sizes[-1]) - - # Remaining member variables() - self._activation = torch.nn.LeakyReLU() - self._nb_inputs = nb_inputs - self._nb_global_variables = 5 + nb_inputs - self._nb_neighbours = nb_neighbours - self._features_subset = features_subset - - self._construct_layers() - - def _construct_layers(self) -> None: - """Construct layers (torch.nn.Modules).""" - # Convolutional operations - nb_input_features = self._nb_inputs - if not self._add_global_variables_after_pooling: - nb_input_features += self._nb_global_variables - - self._conv_layers = torch.nn.ModuleList() - nb_latent_features = nb_input_features - for sizes in self._dynedge_layer_sizes: - layers = [] - layer_sizes = [nb_latent_features] + list(sizes) - for ix, (nb_in, nb_out) in enumerate( - zip(layer_sizes[:-1], layer_sizes[1:]) - ): - if ix == 0: - nb_in *= 2 - layers.append(torch.nn.Linear(nb_in, nb_out)) - layers.append(self._activation) - - conv_layer = DynEdgeConv( - torch.nn.Sequential(*layers), - aggr="add", - nb_neighbors=self._nb_neighbours, - features_subset=self._features_subset, - ) - self._conv_layers.append(conv_layer) - - nb_latent_features = nb_out - - # Post-processing operations - nb_latent_features = ( - sum(sizes[-1] for sizes in self._dynedge_layer_sizes) - + nb_input_features - ) - - post_processing_layers = [] - layer_sizes = [nb_latent_features] + list( - self._post_processing_layer_sizes - ) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - post_processing_layers.append(torch.nn.Linear(nb_in, nb_out)) - post_processing_layers.append(self._activation) - - self._post_processing = torch.nn.Sequential(*post_processing_layers) - - # Read-out operations - nb_poolings = ( - len(self._global_pooling_schemes) - if self._global_pooling_schemes - else 1 - ) - nb_latent_features = nb_out * nb_poolings - if self._add_global_variables_after_pooling: - nb_latent_features += self._nb_global_variables - - readout_layers = [] - layer_sizes = [nb_latent_features] + list(self._readout_layer_sizes) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - readout_layers.append(torch.nn.Linear(nb_in, nb_out)) - readout_layers.append(self._activation) - - self._readout = torch.nn.Sequential(*readout_layers) - - def _global_pooling(self, x: Tensor, batch: LongTensor) -> Tensor: - """Perform global pooling.""" - assert self._global_pooling_schemes - pooled = [] - for pooling_scheme in self._global_pooling_schemes: - pooling_fn = GLOBAL_POOLINGS[pooling_scheme] - pooled_x = pooling_fn(x, index=batch, dim=0) - if isinstance(pooled_x, tuple) and len(pooled_x) == 2: - # `scatter_{min,max}`, which return also an argument, vs. - # `scatter_{mean,sum}` - pooled_x, _ = pooled_x - pooled.append(pooled_x) - - return torch.cat(pooled, dim=1) - - def _calculate_global_variables( - self, - x: Tensor, - edge_index: LongTensor, - batch: LongTensor, - *additional_attributes: Tensor, - ) -> Tensor: - """Calculate global variables.""" - # Calculate homophily (scalar variables) - h_x, h_y, h_z, h_t = calculate_xyzt_homophily(x, edge_index, batch) - - # Calculate mean features - global_means = scatter_mean(x, batch, dim=0) - - # Add global variables - global_variables = torch.cat( - [ - global_means, - h_x, - h_y, - h_z, - h_t, - ] - + [attr.unsqueeze(dim=1) for attr in additional_attributes], - dim=1, - ) - - return global_variables - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - global_variables = self._calculate_global_variables( - x, - edge_index, - batch, - torch.log10(data.n_pulses), - ) - - # Distribute global variables out to each node - if not self._add_global_variables_after_pooling: - distribute = ( - batch.unsqueeze(dim=1) == torch.unique(batch).unsqueeze(dim=0) - ).type(torch.float) - - global_variables_distributed = torch.sum( - distribute.unsqueeze(dim=2) - * global_variables.unsqueeze(dim=0), - dim=1, - ) - - x = torch.cat((x, global_variables_distributed), dim=1) - - # DynEdge-convolutions - skip_connections = [x] - for conv_layer in self._conv_layers: - x, edge_index = conv_layer(x, edge_index, batch) - skip_connections.append(x) - - # Skip-cat - x = torch.cat(skip_connections, dim=1) - - # Post-processing - x = self._post_processing(x) - - # (Optional) Global pooling - if self._global_pooling_schemes: - x = self._global_pooling(x, batch=batch) - if self._add_global_variables_after_pooling: - x = torch.cat( - [ - x, - global_variables, - ], - dim=1, - ) - - # Read-out - x = self._readout(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/dynedge_jinst.html b/_modules/graphnet/models/gnn/dynedge_jinst.html deleted file mode 100644 index 54e3e08fc..000000000 --- a/_modules/graphnet/models/gnn/dynedge_jinst.html +++ /dev/null @@ -1,519 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.dynedge_jinst — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.dynedge_jinst

-"""Implementation of the exact DynEdge architecture used in [2209.03042].
-
-Author: Rasmus Oersoe
-"""
-from typing import Optional
-
-import torch
-from torch import Tensor
-from torch_geometric.data import Data
-from torch_scatter import scatter_max, scatter_mean, scatter_min, scatter_sum
-
-from graphnet.models.components.layers import DynEdgeConv
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.utils import calculate_xyzt_homophily
-
-
-
-[docs] -class DynEdgeJINST(GNN): - """DynEdge (dynamical edge convolutional) model used in [2209.03042].""" - - def __init__( - self, - nb_inputs: int, - layer_size_scale: int = 4, - ): - """Construct `DynEdgeJINST`. - - Args: - nb_inputs: Number of input features. - nb_outputs: Number of output features. - layer_size_scale: Integer that scales the size of hidden layers. - """ - # Architecture configuration - c = layer_size_scale - l1, l2, l3, l4, l5, l6 = ( - nb_inputs, - c * 16 * 2, - c * 32 * 2, - c * 42 * 2, - c * 32 * 2, - c * 16 * 2, - ) - - # Base class constructor - super().__init__(nb_inputs, l6) - - # Graph convolutional operations - features_subset = slice(0, 3) - nb_neighbors = 8 - - self.conv_add1 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l1 * 2, l2), - torch.nn.LeakyReLU(), - torch.nn.Linear(l2, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - self.conv_add2 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l3 * 2, l4), - torch.nn.LeakyReLU(), - torch.nn.Linear(l4, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - self.conv_add3 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l3 * 2, l4), - torch.nn.LeakyReLU(), - torch.nn.Linear(l4, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - self.conv_add4 = DynEdgeConv( - torch.nn.Sequential( - torch.nn.Linear(l3 * 2, l4), - torch.nn.LeakyReLU(), - torch.nn.Linear(l4, l3), - torch.nn.LeakyReLU(), - ), - aggr="add", - nb_neighbors=nb_neighbors, - features_subset=features_subset, - ) - - # Post-processing operations - self.nn1 = torch.nn.Linear(l3 * 4 + l1, l4) - self.nn2 = torch.nn.Linear(l4, l5) - self.nn3 = torch.nn.Linear(4 * l5 + 5, l6) - self.lrelu = torch.nn.LeakyReLU() - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - # Calculate homophily (scalar variables) - h_x, h_y, h_z, h_t = calculate_xyzt_homophily(x, edge_index, batch) - - a, edge_index = self.conv_add1(x, edge_index, batch) - b, edge_index = self.conv_add2(a, edge_index, batch) - c, edge_index = self.conv_add3(b, edge_index, batch) - d, edge_index = self.conv_add4(c, edge_index, batch) - - # Skip-cat - x = torch.cat((x, a, b, c, d), dim=1) - - # Post-processing - x = self.nn1(x) - x = self.lrelu(x) - x = self.nn2(x) - - # Aggregation across nodes - a, _ = scatter_max(x, batch, dim=0) - b, _ = scatter_min(x, batch, dim=0) - c = scatter_sum(x, batch, dim=0) - d = scatter_mean(x, batch, dim=0) - - # Concatenate aggregations and scalar features - x = torch.cat( - ( - a, - b, - c, - d, - h_t.reshape(-1, 1), - h_x.reshape(-1, 1), - h_y.reshape(-1, 1), - h_z.reshape(-1, 1), - data.n_pulses.reshape(-1, 1), - ), - dim=1, - ) - - # Read-out - x = self.lrelu(x) - x = self.nn3(x) - - x = self.lrelu(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/dynedge_kaggle_tito.html b/_modules/graphnet/models/gnn/dynedge_kaggle_tito.html deleted file mode 100644 index 046822eaf..000000000 --- a/_modules/graphnet/models/gnn/dynedge_kaggle_tito.html +++ /dev/null @@ -1,633 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.dynedge_kaggle_tito — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.dynedge_kaggle_tito

-"""Implementation of DynEdge architecture used in.
-
-                    IceCube - Neutrinos in Deep Ice
-Reconstruct the direction of neutrinos from the Universe to the South Pole
-
-Kaggle competition.
-
-Solution by TITO.
-"""
-
-from typing import List, Tuple, Optional, Union
-
-import torch
-from torch import Tensor, LongTensor
-
-from torch_geometric.data import Data
-from torch_scatter import scatter_max, scatter_mean, scatter_min, scatter_sum
-
-from graphnet.models.components.layers import DynTrans
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.utils import calculate_xyzt_homophily
-
-GLOBAL_POOLINGS = {
-    "min": scatter_min,
-    "max": scatter_max,
-    "sum": scatter_sum,
-    "mean": scatter_mean,
-}
-
-
-
-[docs] -class DynEdgeTITO(GNN): - """DynEdgeTITO (dynamical edge convolutional with Transformer) model.""" - - def __init__( - self, - nb_inputs: int, - features_subset: List[int] = None, - dyntrans_layer_sizes: Optional[List[Tuple[int, ...]]] = None, - global_pooling_schemes: List[str] = ["max"], - use_global_features: bool = True, - use_post_processing_layers: bool = True, - ): - """Construct `DynEdgeTITO`. - - Args: - nb_inputs: Number of input features on each node. - features_subset: The subset of latent features on each node that - are used as metric dimensions when performing the k-nearest - neighbours clustering. Defaults to [0,1,2,3]. - dyntrans_layer_sizes: The layer sizes, or latent feature dimenions, - used in the `DynTrans` layer. - Defaults to [(256, 256), (256, 256), (256, 256), (256, 256)]. - global_pooling_schemes: The list global pooling schemes to use. - Options are: "min", "max", "mean", and "sum". - use_global_features: Whether to use global features after pooling. - use_post_processing_layers: Whether to use post-processing layers - after the `DynTrans` layers. - """ - # DynTrans layer sizes - if dyntrans_layer_sizes is None: - dyntrans_layer_sizes = [ - ( - 256, - 256, - ), - ( - 256, - 256, - ), - ( - 256, - 256, - ), - ( - 256, - 256, - ), - ] - - assert isinstance(dyntrans_layer_sizes, list) - assert len(dyntrans_layer_sizes) - assert all(isinstance(sizes, tuple) for sizes in dyntrans_layer_sizes) - assert all(len(sizes) > 0 for sizes in dyntrans_layer_sizes) - assert all( - all(size > 0 for size in sizes) for sizes in dyntrans_layer_sizes - ) - - self._dyntrans_layer_sizes = dyntrans_layer_sizes - - # Post-processing layer sizes - post_processing_layer_sizes = [ - 336, - 256, - ] - - self._post_processing_layer_sizes = post_processing_layer_sizes - - # Read-out layer sizes - readout_layer_sizes = [ - 256, - 128, - ] - - self._readout_layer_sizes = readout_layer_sizes - - # Global pooling scheme(s) - if isinstance(global_pooling_schemes, str): - global_pooling_schemes = [global_pooling_schemes] - - if isinstance(global_pooling_schemes, list): - for pooling_scheme in global_pooling_schemes: - assert ( - pooling_scheme in GLOBAL_POOLINGS - ), f"Global pooling scheme {pooling_scheme} not supported." - else: - assert global_pooling_schemes is None - - self._global_pooling_schemes = global_pooling_schemes - - assert self._global_pooling_schemes, ( - "No global pooling schemes were request, so cannot add global" - " variables after pooling." - ) - - # Base class constructor - super().__init__(nb_inputs, self._readout_layer_sizes[-1]) - - # Remaining member variables() - self._activation = torch.nn.LeakyReLU() - self._nb_inputs = nb_inputs - self._nb_global_variables = 5 + nb_inputs - self._nb_neighbours = 8 - self._features_subset = features_subset or [0, 1, 2, 3] - self._use_global_features = use_global_features - self._use_post_processing_layers = use_post_processing_layers - self._construct_layers() - - def _construct_layers(self) -> None: - """Construct layers (torch.nn.Modules).""" - # Convolutional operations - nb_input_features = self._nb_inputs - - self._conv_layers = torch.nn.ModuleList() - nb_latent_features = nb_input_features - for sizes in self._dyntrans_layer_sizes: - conv_layer = DynTrans( - [nb_latent_features] + list(sizes), - aggr="max", - features_subset=self._features_subset, - n_head=8, - ) - self._conv_layers.append(conv_layer) - nb_latent_features = sizes[-1] - - if self._use_post_processing_layers: - post_processing_layers = [] - layer_sizes = [nb_latent_features] + list( - self._post_processing_layer_sizes - ) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - post_processing_layers.append(torch.nn.Linear(nb_in, nb_out)) - post_processing_layers.append(self._activation) - last_posting_layer_output_dim = nb_out - - self._post_processing = torch.nn.Sequential( - *post_processing_layers - ) - else: - last_posting_layer_output_dim = nb_latent_features - - # Read-out operations - nb_poolings = ( - len(self._global_pooling_schemes) - if self._global_pooling_schemes - else 1 - ) - nb_latent_features = last_posting_layer_output_dim * nb_poolings - if self._use_global_features: - nb_latent_features += self._nb_global_variables - - readout_layers = [] - layer_sizes = [nb_latent_features] + list(self._readout_layer_sizes) - for nb_in, nb_out in zip(layer_sizes[:-1], layer_sizes[1:]): - readout_layers.append(torch.nn.Linear(nb_in, nb_out)) - readout_layers.append(self._activation) - - self._readout = torch.nn.Sequential(*readout_layers) - - def _global_pooling(self, x: Tensor, batch: LongTensor) -> Tensor: - """Perform global pooling.""" - assert self._global_pooling_schemes - pooled = [] - for pooling_scheme in self._global_pooling_schemes: - pooling_fn = GLOBAL_POOLINGS[pooling_scheme] - pooled_x = pooling_fn(x, index=batch, dim=0) - if isinstance(pooled_x, tuple) and len(pooled_x) == 2: - # `scatter_{min,max}`, which return also an argument, vs. - # `scatter_{mean,sum}` - pooled_x, _ = pooled_x - pooled.append(pooled_x) - - return torch.cat(pooled, dim=1) - - def _calculate_global_variables( - self, - x: Tensor, - edge_index: LongTensor, - batch: LongTensor, - *additional_attributes: Tensor, - ) -> Tensor: - """Calculate global variables.""" - # Calculate homophily (scalar variables) - h_x, h_y, h_z, h_t = calculate_xyzt_homophily(x, edge_index, batch) - - # Calculate mean features - global_means = scatter_mean(x, batch, dim=0) - - # Add global variables - global_variables = torch.cat( - [ - global_means, - h_x, - h_y, - h_z, - h_t, - ] - + [attr.unsqueeze(dim=1) for attr in additional_attributes], - dim=1, - ) - - return global_variables - -
-[docs] - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass.""" - # Convenience variables - x, edge_index, batch = data.x, data.edge_index, data.batch - - if self._use_global_features: - global_variables = self._calculate_global_variables( - x, - edge_index, - batch, - torch.log10(data.n_pulses), - ) - - # DynEdge-convolutions - for conv_layer in self._conv_layers: - x = conv_layer(x, edge_index, batch) - - # Post-processing - if self._use_post_processing_layers: - x = self._post_processing(x) - - x = self._global_pooling(x, batch=batch) - if self._use_global_features: - x = torch.cat( - [ - x, - global_variables, - ], - dim=1, - ) - - # Read-out - x = self._readout(x) - - return x
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/gnn/gnn.html b/_modules/graphnet/models/gnn/gnn.html deleted file mode 100644 index 4f0a11b6d..000000000 --- a/_modules/graphnet/models/gnn/gnn.html +++ /dev/null @@ -1,401 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.gnn.gnn — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.gnn.gnn

-"""Base GNN-specific `Model` class(es)."""
-
-from abc import abstractmethod
-
-from torch import Tensor
-from torch_geometric.data import Data
-
-from graphnet.models import Model
-
-
-
-[docs] -class GNN(Model): - """Base class for all core GNN models in graphnet.""" - - def __init__(self, nb_inputs: int, nb_outputs: int) -> None: - """Construct `GNN`.""" - # Base class constructor - super().__init__() - - # Member variables - self._nb_inputs = nb_inputs - self._nb_outputs = nb_outputs - - @property - def nb_inputs(self) -> int: - """Return number of input features.""" - return self._nb_inputs - - @property - def nb_outputs(self) -> int: - """Return number of output features.""" - return self._nb_outputs - -
-[docs] - @abstractmethod - def forward(self, data: Data) -> Tensor: - """Apply learnable forward pass in model."""
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/edges/edges.html b/_modules/graphnet/models/graphs/edges/edges.html deleted file mode 100644 index c8778c6a0..000000000 --- a/_modules/graphnet/models/graphs/edges/edges.html +++ /dev/null @@ -1,559 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.edges.edges — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.edges.edges

-"""Class(es) for building/connecting graphs."""
-
-from typing import List
-from abc import abstractmethod, ABC
-
-import torch
-from torch_geometric.nn import knn_graph, radius_graph
-from torch_geometric.data import Data
-
-from graphnet.models.utils import calculate_distance_matrix
-from graphnet.models import Model
-
-
-
-[docs] -class EdgeDefinition(Model): # pylint: disable=too-few-public-methods - """Base class for graph building.""" - -
-[docs] - def forward(self, graph: Data) -> Data: - """Construct edges based on problem specific implementation of. - - ´_construct_edges´ - - Args: - graph: a graph without edges - - Returns: - graph: a graph with edges - """ - if graph.edge_index is not None: - self.warning_once( - "GraphBuilder received graph with pre-existing " - "structure. Will overwrite." - ) - return self._construct_edges(graph)
- - - @abstractmethod - def _construct_edges(self, graph: Data) -> Data: - """Construct edges and assign them to graph. I.e. ´graph.edge_index = edge_index´. - - Args: - graph: graph without edges - - Returns: - graph: graph with edges assigned. - """
- - - -
-[docs] -class KNNEdges(EdgeDefinition): # pylint: disable=too-few-public-methods - """Builds edges from the k-nearest neighbours.""" - - def __init__( - self, - nb_nearest_neighbours: int, - columns: List[int] = [0, 1, 2], - ): - """K-NN Edge definition. - - Will connect nodes together with their ´nb_nearest_neighbours´ - nearest neighbours in the feature space given by ´columns´. - - Args: - nb_nearest_neighbours: number of neighbours. - columns: Node features to use for distance calculation. - Defaults to [0,1,2]. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Member variable(s) - self._nb_nearest_neighbours = nb_nearest_neighbours - self._columns = columns - - def _construct_edges(self, graph: Data) -> Data: - """Define K-NN edges.""" - graph.edge_index = knn_graph( - graph.x[:, self._columns], - self._nb_nearest_neighbours, - graph.batch, - ).to(self.device) - - return graph
- - - -
-[docs] -class RadialEdges(EdgeDefinition): - """Builds graph from a sphere of chosen radius centred at each node.""" - - def __init__( - self, - radius: float, - columns: List[int] = [0, 1, 2], - ): - """Radial edges. - - Connects each node to other nodes that are within a sphere of - radius ´r´ centered at the node. The feature space of ´r´ is defined - by ´columns´ - - Args: - radius: radius of sphere - columns: columns of the node feature matrix used. - Defaults to [0,1,2]. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Member variable(s) - self._radius = radius - self._columns = columns - - def _construct_edges(self, graph: Data) -> Data: - """Define radial edges.""" - graph.edge_index = radius_graph( - graph.x[:, self._columns], - self._radius, - graph.batch, - ).to(self.device) - - return graph
- - - -
-[docs] -class EuclideanEdges(EdgeDefinition): # pylint: disable=too-few-public-methods - """Builds edges according to Euclidean distance between nodes. - - See https://arxiv.org/pdf/1809.06166.pdf. - """ - - def __init__( - self, - sigma: float, - threshold: float = 0.0, - columns: List[int] = None, - ): - """Construct `EuclideanEdges`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Check(s) - if columns is None: - columns = [0, 1, 2] - - # Member variable(s) - self._sigma = sigma - self._threshold = threshold - self._columns = columns - - def _construct_edges(self, graph: Data) -> Data: - """Forward pass.""" - # Constructs the adjacency matrix from the raw, DOM-level data and - # returns this matrix - if graph.edge_index is not None: - self.info( - "WARNING: GraphBuilder received graph with pre-existing " - "structure. Will overwrite." - ) - - xyz_coords = graph.x[:, self._columns] - - # Construct block-diagonal matrix indicating whether pulses belong to - # the same event in the batch - batch_mask = graph.batch.unsqueeze(dim=0) == graph.batch.unsqueeze( - dim=1 - ) - - distance_matrix = calculate_distance_matrix(xyz_coords) - affinity_matrix = torch.exp( - -0.5 * distance_matrix**2 / self._sigma**2 - ) - - # Use softmax to normalise all adjacencies to one for each node - exp_row_sums = torch.exp(affinity_matrix).sum(axis=1) - weighted_adj_matrix = torch.exp( - affinity_matrix - ) / exp_row_sums.unsqueeze(dim=1) - - # Only include edges with weights that exceed the chosen threshold (and - # are part of the same event) - sources, targets = torch.where( - (weighted_adj_matrix > self._threshold) & (batch_mask) - ) - edge_weights = weighted_adj_matrix[sources, targets] - - graph.edge_index = torch.stack((sources, targets)) - graph.edge_weight = edge_weights - - return graph
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/edges/minkowski.html b/_modules/graphnet/models/graphs/edges/minkowski.html deleted file mode 100644 index e16af71d8..000000000 --- a/_modules/graphnet/models/graphs/edges/minkowski.html +++ /dev/null @@ -1,464 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.edges.minkowski — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.edges.minkowski

-"""Module containing EdgeDefinitions based on the Minkowski Metric."""
-from typing import Optional, List
-
-import torch
-from torch_geometric.data import Data
-from torch_geometric.utils import to_dense_batch
-from graphnet.models.graphs.edges.edges import EdgeDefinition
-
-
-
-[docs] -def compute_minkowski_distance_mat( - x: torch.Tensor, - y: torch.Tensor, - c: float, - space_coords: Optional[List[int]] = None, - time_coord: Optional[int] = 3, -) -> torch.Tensor: - """Compute all pairwise Minkowski distances. - - Args: - x: First tensor of shape (n, d). - y: Second tensor of shape (m, d). - c: Speed of light, in scaled units. - space_coords: Indices of space coordinates. - time_coord: Index of time coordinate. - - Returns: Matrix of shape (n, m) of all pairwise Minkowski distances. - """ - space_coords = space_coords or [0, 1, 2] - assert x.dim() == 2, "x must be 2-dimensional" - assert y.dim() == 2, "x must be 2-dimensional" - dist = x[:, None] - y[None, :] - pos = dist[:, :, space_coords] - time = dist[:, :, time_coord] * c - return (pos**2).sum(dim=-1) - time**2
- - - -
-[docs] -class MinkowskiKNNEdges(EdgeDefinition): - """Builds edges between most light-like separated.""" - - def __init__( - self, - nb_nearest_neighbours: int, - c: float, - time_like_weight: float = 1.0, - space_coords: Optional[List[int]] = None, - time_coord: Optional[int] = 3, - ): - """Initialize MinkowskiKNNEdges. - - Args: - nb_nearest_neighbours: Number of neighbours to connect to. - c: Speed of light, in scaled units. - time_like_weight: Preference to time-like over space-like edges. - Scales time_like distances by this value, before finding - nearest neighbours. - space_coords: Coordinates of x, y, z. - time_coord: Coordinate of time. - """ - super().__init__(name=__name__, class_name=self.__class__.__name__) - self.nb_nearest_neighbours = nb_nearest_neighbours - self.c = c - self.time_like_weight = time_like_weight - self.space_coords = space_coords or [0, 1, 2] - self.time_coord = time_coord - - def _construct_edges(self, graph: Data) -> Data: - x, mask = to_dense_batch(graph.x, graph.batch) - count = 0 - row = [] - col = [] - for batch in range(x.shape[0]): - distance_mat = compute_minkowski_distance_mat( - x_masked := x[batch][mask[batch]], - x_masked, - self.c, - self.space_coords, - self.time_coord, - ) - num_points = x_masked.shape[0] - num_edges = min(self.nb_nearest_neighbours, num_points) - col += [ - c - for c in range(num_points) - for _ in range(count, count + num_edges) - ] - distance_mat[distance_mat < 0] *= -self.time_like_weight - distance_mat += ( - torch.eye(distance_mat.shape[0]) * 1e9 - ) # self-loops - distance_sorted = distance_mat.argsort(dim=1) - distance_sorted += count # offset by previous events - row += distance_sorted[:num_edges].flatten().tolist() - count += num_points - - graph.edge_index = torch.tensor( - [row, col], dtype=torch.long, device=graph.x.device - ) - return graph
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/graph_definition.html b/_modules/graphnet/models/graphs/graph_definition.html deleted file mode 100644 index 141e4a4e4..000000000 --- a/_modules/graphnet/models/graphs/graph_definition.html +++ /dev/null @@ -1,815 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.graph_definition — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.graph_definition

-"""Modules for defining graphs.
-
-These are self-contained graph definitions that hold all the graph-altering
-code in graphnet. These modules define what the GNNs sees as input and can be
-passed to dataloaders during training and deployment.
-"""
-
-
-from typing import Any, List, Optional, Dict, Callable, Union
-import torch
-from torch_geometric.data import Data
-import numpy as np
-from numpy.random import default_rng, Generator
-
-from graphnet.models.detector import Detector
-from .edges import EdgeDefinition
-from .nodes import NodeDefinition, NodesAsPulses
-from graphnet.models import Model
-
-
-
-[docs] -class GraphDefinition(Model): - """An Abstract class to create graph definitions from.""" - - def __init__( - self, - detector: Detector, - node_definition: NodeDefinition = NodesAsPulses(), - edge_definition: Optional[EdgeDefinition] = None, - input_feature_names: Optional[List[str]] = None, - dtype: Optional[torch.dtype] = torch.float, - perturbation_dict: Optional[Dict[str, float]] = None, - seed: Optional[Union[int, Generator]] = None, - add_inactive_sensors: bool = False, - sensor_mask: Optional[List[int]] = None, - string_mask: Optional[List[int]] = None, - sort_by: str = None, - ): - """Construct ´GraphDefinition´. The ´detector´ holds. - - ´Detector´-specific code. E.g. scaling/standardization and geometry - tables. - - ´node_definition´ defines the nodes in the graph. - - ´edge_definition´ defines the connectivity of the nodes in the graph. - - Args: - detector: The corresponding ´Detector´ representing the data. - node_definition: Definition of nodes. Defaults to NodesAsPulses. - edge_definition: Definition of edges. Defaults to None. - input_feature_names: Names of each column in expected input data - that will be built into a graph. If not provided, - it is automatically assumed that all features in `Detector` is - used. - dtype: data type used for node features. e.g. ´torch.float´ - perturbation_dict: Dictionary mapping a feature name to a standard - deviation according to which the values for this - feature should be randomly perturbed. Defaults - to None. - seed: seed or Generator used to randomly sample perturbations. - Defaults to None. - add_inactive_sensors: If True, inactive sensors will be appended - to the graph with padded pulse information. Defaults to False. - sensor_mask: A list of sensor id's to be masked from the graph. Any - sensor listed here will be removed from the graph. Defaults to None. - string_mask: A list of string id's to be masked from the graph. Defaults to None. - sort_by: Name of node feature to sort by. Defaults to None. - """ - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Member Variables - self._detector = detector - self._edge_definition = edge_definition - self._node_definition = node_definition - self._perturbation_dict = perturbation_dict - self._sensor_mask = sensor_mask - self._string_mask = string_mask - self._add_inactive_sensors = add_inactive_sensors - - self._resolve_masks() - - if self._edge_definition is None: - self.warning_once( - """No EdgeDefinition given. Graphs will not have edges!""" - ) - - if input_feature_names is None: - # Assume all features in Detector is used. - input_feature_names = list(self._detector.feature_map().keys()) # type: ignore - self._input_feature_names = input_feature_names - - # Set input data column names for node definition - self._node_definition.set_output_feature_names( - self._input_feature_names - ) - self.output_feature_names = self._node_definition._output_feature_names - - # Sorting - if sort_by is not None: - assert isinstance(sort_by, str) - try: - sort_by = self.output_feature_names.index(sort_by) # type: ignore - except ValueError as e: - self.error( - f"{sort_by} not in node features {self.output_feature_names}." - ) - raise e - self._sort_by = sort_by - # Set data type - self.to(dtype) - - # Set Input / Output dimensions - self._node_definition.set_number_of_inputs( - input_feature_names=input_feature_names - ) - self.nb_inputs = len(self._input_feature_names) - self.nb_outputs = self._node_definition.nb_outputs - - # Set perturbation_cols if needed - if isinstance(self._perturbation_dict, dict): - self._perturbation_cols = [ - self._input_feature_names.index(key) - for key in self._perturbation_dict.keys() - ] - if seed is not None: - if isinstance(seed, int): - self.rng = default_rng(seed) - elif isinstance(seed, Generator): - self.rng = seed - else: - raise ValueError( - "Invalid seed. Must be an int or a numpy Generator." - ) - else: - self.rng = default_rng() - -
-[docs] - def forward( # type: ignore - self, - input_features: np.ndarray, - input_feature_names: List[str], - truth_dicts: Optional[List[Dict[str, Any]]] = None, - custom_label_functions: Optional[Dict[str, Callable[..., Any]]] = None, - loss_weight_column: Optional[str] = None, - loss_weight: Optional[float] = None, - loss_weight_default_value: Optional[float] = None, - data_path: Optional[str] = None, - ) -> Data: - """Construct graph as ´Data´ object. - - Args: - input_features: Input features for graph construction. Shape ´[num_rows, d]´ - input_feature_names: name of each column. Shape ´[,d]´. - truth_dicts: Dictionary containing truth labels. - custom_label_functions: Custom label functions. See https://github.com/graphnet-team/graphnet/blob/main/GETTING_STARTED.md#adding-custom-truth-labels. - loss_weight_column: Name of column that holds loss weight. - Defaults to None. - loss_weight: Loss weight associated with event. Defaults to None. - loss_weight_default_value: default value for loss weight. - Used in instances where some events have - no pre-defined loss weight. Defaults to None. - data_path: Path to dataset data files. Defaults to None. - - Returns: - graph - """ - # Checks - self._validate_input( - input_features=input_features, - input_feature_names=input_feature_names, - ) - - # Add inactive sensors if `add_inactive_sensors = True` - if self._add_inactive_sensors: - input_features = self._attach_inactive_sensors( - input_features, input_feature_names - ) - - # Mask out sensors if `sensor_mask` is given - if self._sensor_mask is not None: - input_features = self._mask_sensors( - input_features, input_feature_names - ) - - # Gaussian perturbation of each column if perturbation dict is given - input_features = self._perturb_input(input_features) - - # Transform to pytorch tensor - input_features = torch.tensor(input_features, dtype=self.dtype) - - # Standardize / Scale node features - input_features = self._detector(input_features, input_feature_names) - - # Create graph & get new node feature names - graph, node_feature_names = self._node_definition(input_features) - if self._sort_by is not None: - graph.x = graph.x[graph.x[:, self._sort_by].sort()[1]] - - # Enforce dtype - graph.x = graph.x.type(self.dtype) - - # Attach number of pulses as static attribute. - graph.n_pulses = torch.tensor(len(input_features), dtype=torch.int32) - - # Assign edges - if self._edge_definition is not None: - graph = self._edge_definition(graph) - - # Attach data path - useful for Ensemble datasets. - if data_path is not None: - graph["dataset_path"] = data_path - - # Attach loss weights if they exist - graph = self._add_loss_weights( - graph=graph, - loss_weight=loss_weight, - loss_weight_column=loss_weight_column, - loss_weight_default_value=loss_weight_default_value, - ) - - # Attach default truth labels and node truths - if truth_dicts is not None: - graph = self._add_truth(graph=graph, truth_dicts=truth_dicts) - - # Attach custom truth labels - if custom_label_functions is not None: - graph = self._add_custom_labels( - graph=graph, custom_label_functions=custom_label_functions - ) - - # Attach node features as seperate fields. MAY NOT CONTAIN 'x' - graph = self._add_features_individually( - graph=graph, node_feature_names=node_feature_names - ) - - # Add GraphDefinition Stamp - graph["graph_definition"] = self.__class__.__name__ - return graph
- - - def _resolve_masks(self) -> None: - """Handle cases with sensor/string masks.""" - if self._sensor_mask is not None: - if self._string_mask is not None: - assert ( - 1 == 2 - ), """Got arguments for both `sensor_mask`and `string_mask`. Please specify only one. """ - - if (self._sensor_mask is None) & (self._string_mask is not None): - self._sensor_mask = self._convert_string_to_sensor_mask() - - return - - def _convert_string_to_sensor_mask(self) -> List[int]: - """Convert a string mask to a sensor mask.""" - string_id_column = self._detector.string_id_column - sensor_id_column = self._detector.sensor_id_column - geometry_table = self._detector.geometry_table - idx = geometry_table[string_id_column].isin(self._string_mask) - return np.asarray(geometry_table.loc[idx, sensor_id_column]).tolist() - - def _attach_inactive_sensors( - self, input_features: np.ndarray, input_feature_names: List[str] - ) -> np.ndarray: - """Attach inactive sensors to `input_features`. - - This function will query the detector geometry table and add any sensor - in the geometry table that is not already present in `node_features`. - """ - lookup = self._geometry_table_lookup( - input_features, input_feature_names - ) - geometry_table = self._detector.geometry_table - unique_sensors = geometry_table.reset_index(drop=True) - - # multiple lines to avoid long line: - inactive_idx = ~geometry_table.index.isin(lookup) - inactive_sensors = unique_sensors.loc[ - inactive_idx, input_feature_names - ] - input_features = np.concatenate( - [input_features, inactive_sensors.to_numpy()], axis=0 - ) - return input_features - - def _mask_sensors( - self, input_features: np.ndarray, input_feature_names: List[str] - ) -> np.ndarray: - """Mask sensors according to `sensor_mask`.""" - sensor_id_column = self._detector.sensor_index_name - geometry_table = self._detector.geometry_table - - lookup = self._geometry_table_lookup( - input_features=input_features, - input_feature_names=input_feature_names, - ) - mask = ~geometry_table.loc[lookup, sensor_id_column].isin( - self._sensor_mask - ) - - return input_features[mask, :] - - def _geometry_table_lookup( - self, input_features: np.ndarray, input_feature_names: List[str] - ) -> np.ndarray: - """Convert xyz in `input_features` into a set of sensor ids.""" - lookup_columns = [ - input_feature_names.index(feature) - for feature in self._detector.sensor_position_names - ] - idx = [*zip(*[tuple(input_features[:, k]) for k in lookup_columns])] - return self._detector.geometry_table.loc[idx, :].index - - def _validate_input( - self, input_features: np.array, input_feature_names: List[str] - ) -> None: - # node feature matrix dimension check - assert input_features.shape[1] == len(input_feature_names) - - # check that provided features for input is the same that the ´Graph´ - # was instantiated with. - assert len(input_feature_names) == len( - self._input_feature_names - ), f"""Input features ({input_feature_names}) is not what - {self.__class__.__name__} was instatiated - with ({self._input_feature_names})""" # noqa - for idx in range(len(input_feature_names)): - assert ( - input_feature_names[idx] == self._input_feature_names[idx] - ), f""" Order of node features in data - are not the same as expected. Got {input_feature_names} - vs. {self._input_feature_names}""" # noqa - - def _perturb_input(self, input_features: np.ndarray) -> np.ndarray: - if isinstance(self._perturbation_dict, dict): - self.warning_once( - f"""Will randomly perturb - {list(self._perturbation_dict.keys())} - using stds {self._perturbation_dict.values()}""" # noqa - ) - perturbed_features = self.rng.normal( - loc=input_features[:, self._perturbation_cols], - scale=np.array( - list(self._perturbation_dict.values()), dtype=float - ), - ) - input_features[:, self._perturbation_cols] = perturbed_features - return input_features - - def _add_loss_weights( - self, - graph: Data, - loss_weight_column: Optional[str] = None, - loss_weight: Optional[float] = None, - loss_weight_default_value: Optional[float] = None, - ) -> Data: - """Attempt to store a loss weight in the graph for use during training. - - I.e. `graph[loss_weight_column] = loss_weight` - - Args: - loss_weight: The non-negative weight to be stored. - graph: Data object representing the event. - loss_weight_column: The name under which the weight is stored in - the graph. - loss_weight_default_value: The default value used if - none was retrieved. - - Returns: - A graph with loss weight added, if available. - """ - # Add loss weight to graph. - if loss_weight is not None and loss_weight_column is not None: - # No loss weight was retrieved, i.e., it is missing for the current - # event. - if loss_weight < 0: - if loss_weight_default_value is None: - raise ValueError( - "At least one event is missing an entry in " - f"{loss_weight_column} " - "but loss_weight_default_value is None." - ) - graph[loss_weight_column] = torch.tensor( - self._loss_weight_default_value, dtype=self.dtype - ).reshape(-1, 1) - else: - graph[loss_weight_column] = torch.tensor( - loss_weight, dtype=self.dtype - ).reshape(-1, 1) - return graph - - def _add_truth( - self, graph: Data, truth_dicts: List[Dict[str, Any]] - ) -> Data: - """Add truth labels from ´truth_dicts´ to ´graph´. - - I.e. ´graph[key] = truth_dict[key]´ - - - Args: - graph: graph where the label will be stored - truth_dicts: dictionary containing the labels - - Returns: - graph with labels - """ - # Write attributes, either target labels, truth info or original - # features. - for truth_dict in truth_dicts: - for key, value in truth_dict.items(): - try: - graph[key] = torch.tensor(value) - except TypeError: - # Cannot convert `value` to Tensor due to its data type, - # e.g. `str`. - self.debug( - ( - f"Could not assign `{key}` with type " - f"'{type(value).__name__}' as attribute to graph." - ) - ) - return graph - - def _add_features_individually( - self, - graph: Data, - node_feature_names: List[str], - ) -> Data: - # Additionally add original features as (static) attributes - graph.features = node_feature_names - for index, feature in enumerate(node_feature_names): - if feature not in ["x"]: # reserved for node features. - graph[feature] = graph.x[:, index].detach() - else: - self.warning_once( - """Cannot assign graph['x']. This field is reserved for - node features. Please rename your input feature.""" - ) # noqa - - return graph - - def _add_custom_labels( - self, - graph: Data, - custom_label_functions: Dict[str, Callable[..., Any]], - ) -> Data: - # Add custom labels to the graph - for key, fn in custom_label_functions.items(): - graph[key] = fn(graph) - return graph
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/graphs.html b/_modules/graphnet/models/graphs/graphs.html deleted file mode 100644 index 7e77b2ccc..000000000 --- a/_modules/graphnet/models/graphs/graphs.html +++ /dev/null @@ -1,419 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.graphs — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.graphs

-"""A module containing different graph representations in GraphNeT."""
-
-from typing import List, Optional, Dict, Union
-import torch
-from numpy.random import Generator
-
-from .graph_definition import GraphDefinition
-from graphnet.models.detector import Detector
-from graphnet.models.graphs.edges import EdgeDefinition, KNNEdges
-from graphnet.models.graphs.nodes import NodeDefinition, NodesAsPulses
-
-
-
-[docs] -class KNNGraph(GraphDefinition): - """A Graph representation where Edges are drawn to nearest neighbours.""" - - def __init__( - self, - detector: Detector, - node_definition: NodeDefinition = None, - input_feature_names: Optional[List[str]] = None, - dtype: Optional[torch.dtype] = torch.float, - perturbation_dict: Optional[Dict[str, float]] = None, - seed: Optional[Union[int, Generator]] = None, - nb_nearest_neighbours: int = 8, - columns: List[int] = [0, 1, 2], - ) -> None: - """Construct k-nn graph representation. - - Args: - detector: Detector that represents your data. - node_definition: Definition of nodes in the graph. - input_feature_names: Name of input feature columns. - dtype: data type for node features. - perturbation_dict: Dictionary mapping a feature name to a standard - deviation according to which the values for this - feature should be randomly perturbed. Defaults - to None. - seed: seed or Generator used to randomly sample perturbations. - Defaults to None. - nb_nearest_neighbours: Number of edges for each node. Defaults to 8. - columns: node feature columns used for distance calculation - . Defaults to [0, 1, 2]. - """ - # Base class constructor - super().__init__( - detector=detector, - node_definition=node_definition or NodesAsPulses(), - edge_definition=KNNEdges( - nb_nearest_neighbours=nb_nearest_neighbours, - columns=columns, - ), - dtype=dtype, - input_feature_names=input_feature_names, - perturbation_dict=perturbation_dict, - seed=seed, - )
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/nodes/nodes.html b/_modules/graphnet/models/graphs/nodes/nodes.html deleted file mode 100644 index 0a3da4c5c..000000000 --- a/_modules/graphnet/models/graphs/nodes/nodes.html +++ /dev/null @@ -1,591 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.nodes.nodes — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.nodes.nodes

-"""Class(es) for building/connecting graphs."""
-
-from typing import List, Tuple, Optional
-from abc import abstractmethod
-
-import torch
-from torch_geometric.data import Data
-
-from graphnet.utilities.decorators import final
-from graphnet.models import Model
-from graphnet.models.graphs.utils import (
-    cluster_summarize_with_percentiles,
-    identify_indices,
-)
-from copy import deepcopy
-
-
-
-[docs] -class NodeDefinition(Model): # pylint: disable=too-few-public-methods - """Base class for graph building.""" - - def __init__( - self, input_feature_names: Optional[List[str]] = None - ) -> None: - """Construct `Detector`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - if input_feature_names is not None: - self.set_output_feature_names( - input_feature_names=input_feature_names - ) - -
-[docs] - @final - def forward(self, x: torch.tensor) -> Tuple[Data, List[str]]: - """Construct nodes from raw node features. - - Args: - x: standardized node features with shape ´[num_pulses, d]´, - where ´d´ is the number of node features. - node_feature_names: list of names for each column in ´x´. - - Returns: - graph: a graph without edges - new_features_name: List of new feature names. - """ - graph = self._construct_nodes(x=x) - try: - self._output_feature_names - except AttributeError as e: - self.error( - f"""{self.__class__.__name__} was instantiated without - `input_feature_names` and it was not set prior to this - forward call. If you are using this class outside a - `GraphDefinition`, please instatiate - with `input_feature_names`.""" - ) # noqa - raise e - return graph, self._output_feature_names
- - - @property - def nb_outputs(self) -> int: - """Return number of output features. - - This the default, but may be overridden by specific inheriting classes. - """ - return len(self._output_feature_names) - -
-[docs] - @final - def set_number_of_inputs(self, input_feature_names: List[str]) -> None: - """Return number of inputs expected by node definition. - - Args: - input_feature_names: name of each input feature column. - """ - assert isinstance(input_feature_names, list) - self.nb_inputs = len(input_feature_names)
- - -
-[docs] - @final - def set_output_feature_names(self, input_feature_names: List[str]) -> None: - """Set output features names as a member variable. - - Args: - input_feature_names: List of column names of the input to the - node definition. - """ - self._output_feature_names = self._define_output_feature_names( - input_feature_names - )
- - - @abstractmethod - def _define_output_feature_names( - self, input_feature_names: List[str] - ) -> List[str]: - """Construct names of output columns. - - Args: - input_feature_names: List of column names for the input data. - - Returns: - A list of column names for each column in - the node definition output. - """ - - @abstractmethod - def _construct_nodes(self, x: torch.tensor) -> Tuple[Data, List[str]]: - """Construct nodes from raw node features ´x´. - - Args: - x: standardized node features with shape ´[num_pulses, d]´, - where ´d´ is the number of node features. - feature_names: List of names for reach column in `x`. Identical - order of appearance. Length `d`. - - Returns: - graph: graph without edges. - new_node_features: A list of node features names. - """
- - - -
-[docs] -class NodesAsPulses(NodeDefinition): - """Represent each measured pulse of Cherenkov Radiation as a node.""" - - def _define_output_feature_names( - self, input_feature_names: List[str] - ) -> List[str]: - return input_feature_names - - def _construct_nodes(self, x: torch.Tensor) -> Tuple[Data, List[str]]: - return Data(x=x)
- - - -
-[docs] -class PercentileClusters(NodeDefinition): - """Represent nodes as clusters with percentile summary node features. - - If `cluster_on` is set to the xyz coordinates of DOMs - e.g. `cluster_on = ['dom_x', 'dom_y', 'dom_z']`, each node will be a - unique DOM and the pulse information (charge, time) is summarized using - percentiles. - """ - - def __init__( - self, - cluster_on: List[str], - percentiles: List[int], - add_counts: bool = True, - input_feature_names: Optional[List[str]] = None, - ) -> None: - """Construct `PercentileClusters`. - - Args: - cluster_on: Names of features to create clusters from. - percentiles: List of percentiles. E.g. `[10, 50, 90]`. - add_counts: If True, number of duplicates is added to output array. - input_feature_names: (Optional) column names for input features. - """ - self._cluster_on = cluster_on - self._percentiles = percentiles - self._add_counts = add_counts - # Base class constructor - super().__init__(input_feature_names=input_feature_names) - - def _define_output_feature_names( - self, input_feature_names: List[str] - ) -> List[str]: - ( - cluster_idx, - summ_idx, - new_feature_names, - ) = self._get_indices_and_feature_names( - input_feature_names, self._add_counts - ) - self._cluster_indices = cluster_idx - self._summarization_indices = summ_idx - return new_feature_names - - def _get_indices_and_feature_names( - self, - feature_names: List[str], - add_counts: bool, - ) -> Tuple[List[int], List[int], List[str]]: - cluster_idx, summ_idx, summ_names = identify_indices( - feature_names, self._cluster_on - ) - new_feature_names = deepcopy(self._cluster_on) - for feature in summ_names: - for pct in self._percentiles: - new_feature_names.append(f"{feature}_pct{pct}") - if add_counts: - # add "counts" as the last feature - new_feature_names.append("counts") - return cluster_idx, summ_idx, new_feature_names - - def _construct_nodes(self, x: torch.Tensor) -> Data: - # Cast to Numpy - x = x.numpy() - # Construct clusters with percentile-summarized features - if hasattr(self, "_summarization_indices"): - array = cluster_summarize_with_percentiles( - x=x, - summarization_indices=self._summarization_indices, - cluster_indices=self._cluster_indices, - percentiles=self._percentiles, - add_counts=self._add_counts, - ) - else: - self.error( - f"""{self.__class__.__name__} was not instatiated with - `input_feature_names` and has not been set later. - Please instantiate this class with `input_feature_names` - if you're using it outside `GraphDefinition`.""" - ) # noqa - raise AttributeError - - return Data(x=torch.tensor(array))
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/graphs/utils.html b/_modules/graphnet/models/graphs/utils.html deleted file mode 100644 index 7081b3238..000000000 --- a/_modules/graphnet/models/graphs/utils.html +++ /dev/null @@ -1,532 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.graphs.utils — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.graphs.utils

-"""Utility functions for construction of graphs."""
-
-from typing import List, Tuple
-import numpy as np
-
-
-
-[docs] -def lex_sort(x: np.array, cluster_columns: List[int]) -> np.ndarray: - """Sort numpy arrays according to columns on ´cluster_columns´. - - Note that `x` is sorted along the dimensions in `cluster_columns` - backwards. I.e. `cluster_columns = [0,1,2]` - means `x` is sorted along `[2,1,0]`. - - Args: - x: array to be sorted. - cluster_columns: Columns of `x` to be sorted along. - - Returns: - A sorted version of `x`. - """ - tmp_list = [] - for cluster_column in cluster_columns: - tmp_list.append(x[:, cluster_column]) - return x[np.lexsort(tuple(tmp_list)), :]
- - - -
-[docs] -def gather_cluster_sequence( - x: np.ndarray, feature_idx: int, cluster_columns: List[int] -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Turn `x` into rows of clusters with sequences along columns. - - Sequences along columns are added which correspond to - gathered sequences of the feature in `x` specified by column index - `feature_idx` associated with each column. Sequences are padded with NaN to - be of same length. Dimension of clustered array is `[n_clusters, l + - len(cluster_columns)]`,where l is the largest sequence length. - - **Example**: - Suppose `x` represents a neutrino event and we have chosen to cluster on - the PMT positions and that `feature_idx` correspond to pulse time. - - The resulting array will have dimensions `[n_pmts, m + 3]` where `m` is the - maximum number of same-pmt pulses found in `x`, and `+3`for the three - spatial directions defining each cluster. - - Args: - x: Array for clustering - feature_idx: Index of the feature in `x` to - be gathered for each cluster. - cluster_columns: Index in `x` from which to build clusters. - - Returns: - array: Array with dimensions `[n_clusters, l + len(cluster_columns)]` - column_offset: Indices of the columns in `array` that defines clusters. - """ - # sort pulses according to cluster columns - x = lex_sort(x=x, cluster_columns=cluster_columns) - - # Calculate clusters and counts - unique_sensors, counts = np.unique( - x[:, cluster_columns], return_counts=True, axis=0 - ) - # sort DOMs and pulse-counts - sort_this = np.concatenate([unique_sensors, counts.reshape(-1, 1)], axis=1) - sort_this = lex_sort(x=sort_this, cluster_columns=cluster_columns) - unique_sensors = sort_this[:, 0 : unique_sensors.shape[1]] - counts = sort_this[:, unique_sensors.shape[1] :].flatten().astype(int) - - # Pad unique sensor columns with NaN's up until the maximum number of - # Same pmt-pulses. Each of padded columns represents a pulse. - pad = np.empty((unique_sensors.shape[0], max(counts))) - pad[:] = np.nan - array = np.concatenate([unique_sensors, pad], axis=1) - column_offset = unique_sensors.shape[1] - - # Construct indices for loop - cumsum = np.zeros(len(np.cumsum(counts)) + 1) - cumsum[0] = 0 - cumsum[1:] = np.cumsum(counts) - cumsum = cumsum.astype(int) - - # Insert pulse attribute in place of NaN. - for k in range(len(counts)): - array[k, column_offset : (column_offset + counts[k])] = x[ - cumsum[k] : cumsum[k + 1], feature_idx - ] - return array, column_offset, counts
- - - -
-[docs] -def identify_indices( - feature_names: List[str], cluster_on: List[str] -) -> Tuple[List[int], List[int], List[str]]: - """Identify indices for clustering and summarization.""" - features_for_summarization = [] - for feature in feature_names: - if feature not in cluster_on: - features_for_summarization.append(feature) - cluster_indices = [feature_names.index(column) for column in cluster_on] - summarization_indices = [ - feature_names.index(column) for column in features_for_summarization - ] - return cluster_indices, summarization_indices, features_for_summarization
- - - -
-[docs] -def cluster_summarize_with_percentiles( - x: np.ndarray, - summarization_indices: List[int], - cluster_indices: List[int], - percentiles: List[int], - add_counts: bool, -) -> np.ndarray: - """Turn `x` into clusters with percentile summary. - - From variables specified by column indices `cluster_indices`, `x` is turned - into clusters. Information in columns of `x` specified by indices - `summarization_indices` with each cluster is summarized using percentiles. - It is assumed `x` represents a single event. - - **Example use-case**: - Suppose `x` contains raw pulses from a neutrino event where some DOMs have - multiple measurements of Cherenkov radiation. If `cluster_indices` is set - to the columns corresponding to the xyz-position of the DOMs, and the - features specified in `summarization_indices` correspond to time, charge, - then each row in the returned array will correspond to a DOM, - and the time and charge for each DOM will be summarized by percentiles. - Returned output array has dimensions - `[n_clusters, len(percentiles)*len(summarization_indices) + len(cluster_indices)]` - - Args: - x: Array to be clustered - summarization_indices: List of column indices that defines features - that will be summarized with percentiles. - cluster_indices: List of column indices on which the clusters - are constructed. - percentiles: percentiles used to summarize `x`. E.g. [10,50,90]. - - Returns: - Percentile-summarized array - """ - pct_dict = {} - for feature_idx in summarization_indices: - summarized_array, column_offset, counts = gather_cluster_sequence( - x, feature_idx, cluster_indices - ) - pct_dict[feature_idx] = np.nanpercentile( - summarized_array[:, column_offset:], percentiles, axis=1 - ).T - - for i, key in enumerate(pct_dict.keys()): - if i == 0: - array = summarized_array[:, 0:column_offset] - - array = np.concatenate([array, pct_dict[key]], axis=1) - - if add_counts: - array = np.concatenate( - [array, np.log10(counts).reshape(-1, 1)], axis=1 - ) - - return array
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/standard_model.html b/_modules/graphnet/models/standard_model.html deleted file mode 100644 index 8d219a1e4..000000000 --- a/_modules/graphnet/models/standard_model.html +++ /dev/null @@ -1,912 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.standard_model — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.standard_model

-"""Standard model class(es)."""
-from collections import OrderedDict
-from typing import Any, Dict, List, Optional, Union, Type
-
-import numpy as np
-import torch
-from pytorch_lightning import Callback, Trainer
-from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
-from torch import Tensor
-from torch.nn import ModuleList
-from torch.optim import Adam
-from torch.utils.data import DataLoader, SequentialSampler
-from torch_geometric.data import Data
-import pandas as pd
-from pytorch_lightning.loggers import Logger as LightningLogger
-
-from graphnet.training.callbacks import ProgressBar
-from graphnet.models.graphs import GraphDefinition
-from graphnet.models.gnn.gnn import GNN
-from graphnet.models.model import Model
-from graphnet.models.task import StandardLearnedTask
-
-
-
-[docs] -class StandardModel(Model): - """Main class for standard models in graphnet. - - This class chains together the different elements of a complete GNN- based - model (detector read-in, GNN backbone, and task-specific read-outs). - """ - - def __init__( - self, - *, - graph_definition: GraphDefinition, - backbone: GNN = None, - gnn: Optional[GNN] = None, - tasks: Union[StandardLearnedTask, List[StandardLearnedTask]], - optimizer_class: Type[torch.optim.Optimizer] = Adam, - optimizer_kwargs: Optional[Dict] = None, - scheduler_class: Optional[type] = None, - scheduler_kwargs: Optional[Dict] = None, - scheduler_config: Optional[Dict] = None, - ) -> None: - """Construct `StandardModel`.""" - # Base class constructor - super().__init__(name=__name__, class_name=self.__class__.__name__) - - # Check(s) - if isinstance(tasks, StandardLearnedTask): - tasks = [tasks] - assert isinstance(tasks, (list, tuple)) - assert all(isinstance(task, StandardLearnedTask) for task in tasks) - assert isinstance(graph_definition, GraphDefinition) - - # deprecation warnings - if (backbone is None) & (gnn is not None): - backbone = gnn - # Code continues after warning - self.warning( - """DeprecationWarning: Argument `gnn` will be deprecated in GraphNeT 2.0. Please use `backbone` instead.""" - ) - elif (backbone is None) & (gnn is None): - # Code stops - raise TypeError( - "__init__() missing 1 required keyword-only argument: 'backbone'" - ) - assert isinstance(backbone, GNN) - - # Member variable(s) - self._graph_definition = graph_definition - self.backbone = backbone - self._tasks = ModuleList(tasks) - self._optimizer_class = optimizer_class - self._optimizer_kwargs = optimizer_kwargs or dict() - self._scheduler_class = scheduler_class - self._scheduler_kwargs = scheduler_kwargs or dict() - self._scheduler_config = scheduler_config or dict() - - # set dtype of GNN from graph_definition - self.backbone.type(self._graph_definition._dtype) - - @staticmethod - def _construct_trainer( - max_epochs: int = 10, - gpus: Optional[Union[List[int], int]] = None, - callbacks: Optional[List[Callback]] = None, - logger: Optional[LightningLogger] = None, - log_every_n_steps: int = 1, - gradient_clip_val: Optional[float] = None, - distribution_strategy: Optional[str] = "ddp", - **trainer_kwargs: Any, - ) -> Trainer: - if gpus: - accelerator = "gpu" - devices = gpus - else: - accelerator = "cpu" - devices = 1 - - trainer = Trainer( - accelerator=accelerator, - devices=devices, - max_epochs=max_epochs, - callbacks=callbacks, - log_every_n_steps=log_every_n_steps, - logger=logger, - gradient_clip_val=gradient_clip_val, - strategy=distribution_strategy, - **trainer_kwargs, - ) - - return trainer - -
-[docs] - def fit( - self, - train_dataloader: DataLoader, - val_dataloader: Optional[DataLoader] = None, - *, - max_epochs: int = 10, - early_stopping_patience: int = 5, - gpus: Optional[Union[List[int], int]] = None, - callbacks: Optional[List[Callback]] = None, - ckpt_path: Optional[str] = None, - logger: Optional[LightningLogger] = None, - log_every_n_steps: int = 1, - gradient_clip_val: Optional[float] = None, - distribution_strategy: Optional[str] = "ddp", - **trainer_kwargs: Any, - ) -> None: - """Fit `StandardModel` using `pytorch_lightning.Trainer`.""" - # Checks - if callbacks is None: - # We create the bare-minimum callbacks for you. - callbacks = self._create_default_callbacks( - val_dataloader=val_dataloader, - early_stopping_patience=early_stopping_patience, - ) - self.debug("No Callbacks specified. Default callbacks added.") - else: - # You are on your own! - self.debug("Initializing training with user-provided callbacks.") - pass - self._print_callbacks(callbacks) - has_early_stopping = self._contains_callback(callbacks, EarlyStopping) - has_model_checkpoint = self._contains_callback( - callbacks, ModelCheckpoint - ) - - if (has_early_stopping) & (has_model_checkpoint is False): - self.warning( - """No ModelCheckpoint found in callbacks. Best-fit model will not automatically be loaded after training!""" - ) - - self.train(mode=True) - trainer = self._construct_trainer( - max_epochs=max_epochs, - gpus=gpus, - callbacks=callbacks, - logger=logger, - log_every_n_steps=log_every_n_steps, - gradient_clip_val=gradient_clip_val, - distribution_strategy=distribution_strategy, - **trainer_kwargs, - ) - - try: - trainer.fit( - self, train_dataloader, val_dataloader, ckpt_path=ckpt_path - ) - except KeyboardInterrupt: - self.warning("[ctrl+c] Exiting gracefully.") - pass - - # Load weights from best-fit model after training if possible - if has_early_stopping & has_model_checkpoint: - for callback in callbacks: - if isinstance(callback, ModelCheckpoint): - checkpoint_callback = callback - self.load_state_dict( - torch.load(checkpoint_callback.best_model_path)["state_dict"] - ) - self.info("Best-fit weights from EarlyStopping loaded.")
- - - def _print_callbacks(self, callbacks: List[Callback]) -> None: - callback_names = [] - for cbck in callbacks: - callback_names.append(cbck.__class__.__name__) - self.info( - f"Training initiated with callbacks: {', '.join(callback_names)}" - ) - - def _contains_callback( - self, callbacks: List[Callback], callback: Callback - ) -> bool: - """Check if `callback` is in `callbacks`.""" - for cbck in callbacks: - if isinstance(cbck, callback): - return True - return False - - @property - def target_labels(self) -> List[str]: - """Return target label.""" - return [label for task in self._tasks for label in task._target_labels] - - @property - def prediction_labels(self) -> List[str]: - """Return prediction labels.""" - return [ - label for task in self._tasks for label in task._prediction_labels - ] - -
-[docs] - def configure_optimizers(self) -> Dict[str, Any]: - """Configure the model's optimizer(s).""" - optimizer = self._optimizer_class( - self.parameters(), **self._optimizer_kwargs - ) - config = { - "optimizer": optimizer, - } - if self._scheduler_class is not None: - scheduler = self._scheduler_class( - optimizer, **self._scheduler_kwargs - ) - config.update( - { - "lr_scheduler": { - "scheduler": scheduler, - **self._scheduler_config, - }, - } - ) - return config
- - -
-[docs] - def forward( - self, data: Union[Data, List[Data]] - ) -> List[Union[Tensor, Data]]: - """Forward pass, chaining model components.""" - if isinstance(data, Data): - data = [data] - x_list = [] - for d in data: - x = self.backbone(d) - x_list.append(x) - x = torch.cat(x_list, dim=0) - - preds = [task(x) for task in self._tasks] - return preds
- - -
-[docs] - def shared_step(self, batch: List[Data], batch_idx: int) -> Tensor: - """Perform shared step. - - Applies the forward pass and the following loss calculation, shared - between the training and validation step. - """ - preds = self(batch) - loss = self.compute_loss(preds, batch) - return loss
- - -
-[docs] - def training_step( - self, train_batch: Union[Data, List[Data]], batch_idx: int - ) -> Tensor: - """Perform training step.""" - if isinstance(train_batch, Data): - train_batch = [train_batch] - loss = self.shared_step(train_batch, batch_idx) - self.log( - "train_loss", - loss, - batch_size=self._get_batch_size(train_batch), - prog_bar=True, - on_epoch=True, - on_step=False, - sync_dist=True, - ) - return loss
- - -
-[docs] - def validation_step( - self, val_batch: Union[Data, List[Data]], batch_idx: int - ) -> Tensor: - """Perform validation step.""" - if isinstance(val_batch, Data): - val_batch = [val_batch] - loss = self.shared_step(val_batch, batch_idx) - self.log( - "val_loss", - loss, - batch_size=self._get_batch_size(val_batch), - prog_bar=True, - on_epoch=True, - on_step=False, - sync_dist=True, - ) - return loss
- - -
-[docs] - def compute_loss( - self, preds: Tensor, data: List[Data], verbose: bool = False - ) -> Tensor: - """Compute and sum losses across tasks.""" - data_merged = {} - target_labels_merged = list(set(self.target_labels)) - for label in target_labels_merged: - data_merged[label] = torch.cat([d[label] for d in data], dim=0) - for task in self._tasks: - if task._loss_weight is not None: - data_merged[task._loss_weight] = torch.cat( - [d[task._loss_weight] for d in data], dim=0 - ) - - losses = [ - task.compute_loss(pred, data_merged) - for task, pred in zip(self._tasks, preds) - ] - if verbose: - self.info(f"{losses}") - assert all( - loss.dim() == 0 for loss in losses - ), "Please reduce loss for each task separately" - return torch.sum(torch.stack(losses))
- - -
-[docs] - def inference(self) -> None: - """Activate inference mode.""" - for task in self._tasks: - task.inference()
- - -
-[docs] - def train(self, mode: bool = True) -> "Model": - """Deactivate inference mode.""" - super().train(mode) - if mode: - for task in self._tasks: - task.train_eval() - return self
- - -
-[docs] - def predict( - self, - dataloader: DataLoader, - gpus: Optional[Union[List[int], int]] = None, - distribution_strategy: Optional[str] = "auto", - ) -> List[Tensor]: - """Return predictions for `dataloader`.""" - self.inference() - self.train(mode=False) - - callbacks = self._create_default_callbacks( - val_dataloader=None, - ) - - inference_trainer = self._construct_trainer( - gpus=gpus, - distribution_strategy=distribution_strategy, - callbacks=callbacks, - ) - - predictions_list = inference_trainer.predict(self, dataloader) - assert len(predictions_list), "Got no predictions" - - nb_outputs = len(predictions_list[0]) - predictions: List[Tensor] = [ - torch.cat([preds[ix] for preds in predictions_list], dim=0) - for ix in range(nb_outputs) - ] - return predictions
- - -
-[docs] - def predict_as_dataframe( - self, - dataloader: DataLoader, - prediction_columns: Optional[List[str]] = None, - *, - additional_attributes: Optional[List[str]] = None, - gpus: Optional[Union[List[int], int]] = None, - distribution_strategy: Optional[str] = "auto", - ) -> pd.DataFrame: - """Return predictions for `dataloader` as a DataFrame. - - Include `additional_attributes` as additional columns in the output - DataFrame. - """ - if prediction_columns is None: - prediction_columns = self.prediction_labels - - if additional_attributes is None: - additional_attributes = [] - assert isinstance(additional_attributes, list) - - if ( - not isinstance(dataloader.sampler, SequentialSampler) - and additional_attributes - ): - print(dataloader.sampler) - raise UserWarning( - "DataLoader has a `sampler` that is not `SequentialSampler`, " - "indicating that shuffling is enabled. Using " - "`predict_as_dataframe` with `additional_attributes` assumes " - "that the sequence of batches in `dataloader` are " - "deterministic. Either call this method a `dataloader` which " - "doesn't resample batches; or do not request " - "`additional_attributes`." - ) - self.info(f"Column names for predictions are: \n {prediction_columns}") - predictions_torch = self.predict( - dataloader=dataloader, - gpus=gpus, - distribution_strategy=distribution_strategy, - ) - predictions = ( - torch.cat(predictions_torch, dim=1).detach().cpu().numpy() - ) - assert len(prediction_columns) == predictions.shape[1], ( - f"Number of provided column names ({len(prediction_columns)}) and " - f"number of output columns ({predictions.shape[1]}) don't match." - ) - - # Get additional attributes - attributes: Dict[str, List[np.ndarray]] = OrderedDict( - [(attr, []) for attr in additional_attributes] - ) - for batch in dataloader: - for attr in attributes: - attribute = batch[attr] - if isinstance(attribute, torch.Tensor): - attribute = attribute.detach().cpu().numpy() - - # Check if node level predictions - # If true, additional attributes are repeated - # to make dimensions fit - if len(predictions) != len(dataloader.dataset): - if len(attribute) < np.sum( - batch.n_pulses.detach().cpu().numpy() - ): - attribute = np.repeat( - attribute, batch.n_pulses.detach().cpu().numpy() - ) - try: - assert len(attribute) == len(batch.x) - except AssertionError: - self.warning_once( - "Could not automatically adjust length" - f"of additional attribute {attr} to match length of" - f"predictions. Make sure {attr} is a graph-level or" - "node-level attribute. Attribute skipped." - ) - pass - attributes[attr].extend(attribute) - - data = np.concatenate( - [predictions] - + [ - np.asarray(values)[:, np.newaxis] - for values in attributes.values() - ], - axis=1, - ) - - results = pd.DataFrame( - data, columns=prediction_columns + additional_attributes - ) - return results
- - - def _create_default_callbacks( - self, - val_dataloader: DataLoader, - early_stopping_patience: Optional[int] = None, - ) -> List: - """Create default callbacks. - - Used in cases where no callbacks are specified by the user in .fit - """ - callbacks = [ProgressBar()] - if val_dataloader is not None: - assert early_stopping_patience is not None - # Add Early Stopping - callbacks.append( - EarlyStopping( - monitor="val_loss", - patience=early_stopping_patience, - ) - ) - # Add Model Check Point - callbacks.append( - ModelCheckpoint( - save_top_k=1, - monitor="val_loss", - mode="min", - filename=f"{self.backbone.__class__.__name__}" - + "-{epoch}-{val_loss:.2f}-{train_loss:.2f}", - ) - ) - self.info( - f"EarlyStopping has been added with a patience of {early_stopping_patience}." - ) - return callbacks - - def _add_early_stopping( - self, val_dataloader: DataLoader, callbacks: List - ) -> List: - if val_dataloader is None: - return callbacks - has_early_stopping = False - assert isinstance(callbacks, list) - for callback in callbacks: - if isinstance(callback, EarlyStopping): - has_early_stopping = True - - if not has_early_stopping: - callbacks.append( - EarlyStopping( - monitor="val_loss", - patience=5, - ) - ) - self.warning_once( - "Got validation dataloader but no EarlyStopping callback. An " - "EarlyStopping callback has been added automatically with " - "patience=5 and monitor = 'val_loss'." - ) - return callbacks
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/task/classification.html b/_modules/graphnet/models/task/classification.html deleted file mode 100644 index cc251f57b..000000000 --- a/_modules/graphnet/models/task/classification.html +++ /dev/null @@ -1,411 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.task.classification — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.task.classification

-"""Classification-specific `Model` class(es)."""
-
-from typing import Any
-
-import torch
-from torch import Tensor
-
-from graphnet.models.task import IdentityTask, StandardLearnedTask
-
-
-
-[docs] -class MulticlassClassificationTask(IdentityTask): - """General task for classifying any number of classes. - - Requires the same number of input features as the number of classes being - predicted. Returns the untransformed latent features, which are interpreted - as the logits for each class being classified. - """
- - - -
-[docs] -class BinaryClassificationTask(StandardLearnedTask): - """Performs binary classification.""" - - # Requires one feature, logit for being signal class. - nb_inputs = 1 - default_target_labels = ["target"] - default_prediction_labels = ["target_pred"] - - def _forward(self, x: Tensor) -> Tensor: - # transform probability of being muon - return torch.sigmoid(x)
- - - -
-[docs] -class BinaryClassificationTaskLogits(StandardLearnedTask): - """Performs binary classification form logits.""" - - # Requires one feature, logit for being signal class. - nb_inputs = 1 - default_target_labels = ["target"] - default_prediction_labels = ["target_pred"] - - def _forward(self, x: Tensor) -> Tensor: - return x
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/task/reconstruction.html b/_modules/graphnet/models/task/reconstruction.html deleted file mode 100644 index 381966505..000000000 --- a/_modules/graphnet/models/task/reconstruction.html +++ /dev/null @@ -1,632 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.task.reconstruction — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.task.reconstruction

-"""Reconstruction-specific `Model` class(es)."""
-
-import numpy as np
-import torch
-from torch import Tensor
-
-from graphnet.models.task import StandardLearnedTask
-from graphnet.utilities.maths import eps_like
-
-
-
-[docs] -class AzimuthReconstructionWithKappa(StandardLearnedTask): - """Reconstructs azimuthal angle and associated kappa (1/var).""" - - # Requires two features: untransformed points in (x,y)-space. - default_target_labels = ["azimuth"] - default_prediction_labels = ["azimuth_pred", "azimuth_kappa"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - kappa = torch.linalg.vector_norm(x, dim=1) + eps_like(x) - angle = torch.atan2(x[:, 1], x[:, 0]) - angle = torch.where( - angle < 0, angle + 2 * np.pi, angle - ) # atan(y,x) -> [-pi, pi] - return torch.stack((angle, kappa), dim=1)
- - - -
-[docs] -class AzimuthReconstruction(AzimuthReconstructionWithKappa): - """Reconstructs azimuthal angle.""" - - # Requires two features: untransformed points in (x,y)-space. - default_target_labels = ["azimuth"] - default_prediction_labels = ["azimuth_pred"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - res = super()._forward(x) - angle = res[:, 0].unsqueeze(1) - kappa = res[:, 1] - sigma = torch.sqrt(1.0 / kappa) - beta = 1e-3 - kl_loss = torch.mean(sigma**2 - torch.log(sigma) - 1) - self._regularisation_loss += beta * kl_loss - return angle
- - - -
-[docs] -class DirectionReconstructionWithKappa(StandardLearnedTask): - """Reconstructs direction with kappa from the 3D-vMF distribution.""" - - # Requires three features: untransformed points in (x,y,z)-space. - default_target_labels = [ - "direction" - ] # contains dir_x, dir_y, dir_z see https://github.com/graphnet-team/graphnet/blob/95309556cfd46a4046bc4bd7609888aab649e295/src/graphnet/training/labels.py#L29 - default_prediction_labels = [ - "dir_x_pred", - "dir_y_pred", - "dir_z_pred", - "direction_kappa", - ] - nb_inputs = 3 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - kappa = torch.linalg.vector_norm(x, dim=1) + eps_like(x) - vec_x = x[:, 0] / kappa - vec_y = x[:, 1] / kappa - vec_z = x[:, 2] / kappa - return torch.stack((vec_x, vec_y, vec_z, kappa), dim=1)
- - - -
-[docs] -class ZenithReconstruction(StandardLearnedTask): - """Reconstructs zenith angle.""" - - # Requires two features: zenith angle itself. - default_target_labels = ["zenith"] - default_prediction_labels = ["zenith_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - return torch.sigmoid(x[:, :1]) * np.pi
- - - -
-[docs] -class ZenithReconstructionWithKappa(ZenithReconstruction): - """Reconstructs zenith angle and associated kappa (1/var).""" - - # Requires one feature in addition to `ZenithReconstruction`: kappa (unceratinty; 1/variance). - default_target_labels = ["zenith"] - default_prediction_labels = ["zenith_pred", "zenith_kappa"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform outputs to angle and prepare prediction - angle = super()._forward(x[:, :1]).squeeze(1) - kappa = torch.abs(x[:, 1]) + eps_like(x) - return torch.stack((angle, kappa), dim=1)
- - - -
-[docs] -class EnergyReconstruction(StandardLearnedTask): - """Reconstructs energy using stable method.""" - - # Requires one feature: untransformed energy - default_target_labels = ["energy"] - default_prediction_labels = ["energy_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform to positive energy domain avoiding `-inf` in `log10` - # Transform, thereby preventing overflow and underflow error. - return torch.nn.functional.softplus(x, beta=0.05) + eps_like(x)
- - - -
-[docs] -class EnergyReconstructionWithPower(StandardLearnedTask): - """Reconstructs energy.""" - - # Requires one feature: untransformed energy - default_target_labels = ["energy"] - default_prediction_labels = ["energy_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform energy - return torch.pow(10, x[:, 0] + 1.0).unsqueeze(1)
- - - -
-[docs] -class EnergyTCReconstruction(StandardLearnedTask): - """Reconstructs track and cascade energies using stable method.""" - - # Requires two features: untransformed energy for track and cascade - default_target_labels = ["energy_track", "energy_cascade"] - default_prediction_labels = ["energy_track_pred", "energy_cascade_pred"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform to positive energy domain avoiding `-inf` in `log10` - # Transform, thereby preventing overflow and underflow error. - x[:, 0] = torch.nn.functional.softplus( - x[:, 0].clone(), beta=0.05 - ) + eps_like(x[:, 0].clone()) - x[:, 1] = torch.nn.functional.softplus( - x[:, 1].clone(), beta=0.05 - ) + eps_like(x[:, 1].clone()) - return x
- - - -
-[docs] -class EnergyReconstructionWithUncertainty(EnergyReconstruction): - """Reconstructs energy and associated uncertainty (log(var)).""" - - # Requires one feature in addition to `EnergyReconstruction`: log-variance (uncertainty). - default_target_labels = ["energy"] - default_prediction_labels = ["energy_pred", "energy_sigma"] - nb_inputs = 2 - - def _forward(self, x: Tensor) -> Tensor: - # Transform energy - energy = super()._forward(x[:, :1]).squeeze(1) - log_var = x[:, 1] - pred = torch.stack((energy, log_var), dim=1) - return pred
- - - -
-[docs] -class VertexReconstruction(StandardLearnedTask): - """Reconstructs vertex position and time.""" - - # Requires four features, x, y, z, and t. - default_target_labels = ["vertex"] - default_prediction_labels = [ - "position_x_pred", - "position_y_pred", - "position_z_pred", - "interaction_time_pred", - ] - nb_inputs = 4 - - def _forward(self, x: Tensor) -> Tensor: - # Scale xyz to roughly the right order of magnitude, leave time - x[:, 0] = x[:, 0] * 1e2 - x[:, 1] = x[:, 1] * 1e2 - x[:, 2] = x[:, 2] * 1e2 - - return x
- - - -
-[docs] -class PositionReconstruction(StandardLearnedTask): - """Reconstructs vertex position.""" - - # Requires three features, x, y, and z. - default_target_labels = ["position"] - default_prediction_labels = [ - "position_x_pred", - "position_y_pred", - "position_z_pred", - ] - nb_inputs = 3 - - def _forward(self, x: Tensor) -> Tensor: - # Scale to roughly the right order of magnitude - x[:, 0] = x[:, 0] * 1e2 - x[:, 1] = x[:, 1] * 1e2 - x[:, 2] = x[:, 2] * 1e2 - - return x
- - - -
-[docs] -class TimeReconstruction(StandardLearnedTask): - """Reconstructs time.""" - - # Requires one feature, time. - default_target_labels = ["interaction_time"] - default_prediction_labels = ["interaction_time_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Leave as it is - return x
- - - -
-[docs] -class InelasticityReconstruction(StandardLearnedTask): - """Reconstructs interaction inelasticity. - - That is, 1-(track energy / hadronic energy). - """ - - # Requires one features: inelasticity itself - default_target_labels = ["inelasticity"] - default_prediction_labels = ["inelasticity_pred"] - nb_inputs = 1 - - def _forward(self, x: Tensor) -> Tensor: - # Transform output to unit range - return torch.sigmoid(x)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/task/task.html b/_modules/graphnet/models/task/task.html deleted file mode 100644 index 97f4dce52..000000000 --- a/_modules/graphnet/models/task/task.html +++ /dev/null @@ -1,844 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.task.task — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.task.task

-"""Base physics task-specific `Model` class(es)."""
-
-from abc import abstractmethod
-from typing import Any, TYPE_CHECKING, List, Tuple, Union
-from typing import Callable, Optional
-import numpy as np
-
-import torch
-from torch import Tensor
-from torch.nn import Linear
-from torch_geometric.data import Data
-
-if TYPE_CHECKING:
-    # Avoid cyclic dependency
-    from graphnet.training.loss_functions import LossFunction  # type: ignore[attr-defined]
-
-from graphnet.models import Model
-from graphnet.utilities.decorators import final
-
-
-
-[docs] -class Task(Model): - """Base class for Tasks in GraphNeT.""" - - @property - @abstractmethod - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - - @property - def default_target_labels(self) -> List[str]: - """Return default target labels.""" - return self._default_target_labels - - @property - def default_prediction_labels(self) -> List[str]: - """Return default prediction labels.""" - return self._default_prediction_labels - - def __init__( - self, - *, - loss_function: "LossFunction", - target_labels: Optional[Union[str, List[str]]] = None, - prediction_labels: Optional[Union[str, List[str]]] = None, - transform_prediction_and_target: Optional[Callable] = None, - transform_target: Optional[Callable] = None, - transform_inference: Optional[Callable] = None, - transform_support: Optional[Tuple] = None, - loss_weight: Optional[str] = None, - ): - """Construct `Task`. - - Args: - loss_function: Loss function appropriate to the task. - target_labels: Name(s) of the quantity/-ies being predicted, used - to extract the target tensor(s) from the `Data` object in - `.compute_loss(...)`. - prediction_labels: The name(s) of each column that is predicted by - the model during inference. If not given, the name will auto - matically be set to `target_label + _pred`. - transform_prediction_and_target: Optional function to transform - both the predicted and target tensor before passing them to the - loss function. Useful e.g. for having the model predict - quantities on a physical scale, but transforming this scale to - O(1) for a numerically stable loss computation. - transform_target: Optional function to transform only the target - tensor before passing it, and the predicted tensor, to the loss - function. Useful e.g. for having the model predict a - transformed version of the target quantity, e.g. the log10- - scaled energy, rather than the physical quantity itself. Used - in conjunction with `transform_inference` to perform the - inverse transform on the predicted quantity to recover the - physical scale. - transform_inference: Optional function to inverse-transform the - model prediction to recover a physical scale. Used in - conjunction with `transform_target`. - transform_support: Optional tuple to specify minimum and maximum - of the range of validity for the inverse transforms - `transform_target` and `transform_inference` in case this is - restricted. By default the invertibility of `transform_target` - is tested on the range [-1e6, 1e6]. - loss_weight: Name of the attribute in `data` containing per-event - loss weights. - """ - # Base class constructor - super().__init__() - # Check(s) - if target_labels is None: - target_labels = self.default_target_labels - if isinstance(target_labels, str): - target_labels = [target_labels] - - if prediction_labels is None: - prediction_labels = self.default_prediction_labels - if isinstance(prediction_labels, str): - prediction_labels = [prediction_labels] - - assert isinstance(target_labels, List) # mypy - assert isinstance(prediction_labels, List) # mypy - # Member variables - self._regularisation_loss: Optional[float] = None - self._target_labels = target_labels - self._prediction_labels = prediction_labels - self._loss_function = loss_function - self._inference = False - self._loss_weight = loss_weight - - self._transform_prediction_training: Callable[ - [Tensor], Tensor - ] = lambda x: x - self._transform_prediction_inference: Callable[ - [Tensor], Tensor - ] = lambda x: x - self._transform_target: Callable[[Tensor], Tensor] = lambda x: x - self._validate_and_set_transforms( - transform_prediction_and_target, - transform_target, - transform_inference, - transform_support, - ) - - @final - def _transform_prediction( - self, prediction: Union[Tensor, Data] - ) -> Union[Tensor, Data]: - if self._inference: - return self._transform_prediction_inference(prediction) - else: - return self._transform_prediction_training(prediction) - -
-[docs] - @final - def inference(self) -> None: - """Activate inference mode.""" - self._inference = True
- - -
-[docs] - @final - def train_eval(self) -> None: - """Deactivate inference mode.""" - self._inference = False
- - - @final - def _validate_and_set_transforms( - self, - transform_prediction_and_target: Union[Callable, None], - transform_target: Union[Callable, None], - transform_inference: Union[Callable, None], - transform_support: Union[Tuple, None], - ) -> None: - """Validate and set transforms. - - Assert that a valid combination of transformation arguments are passed - and update the corresponding functions. - """ - # Checks - assert not ( - (transform_prediction_and_target is not None) - and (transform_target is not None) - ), "Please specify at most one of `transform_prediction_and_target` and `transform_target`" - if (transform_target is not None) != (transform_inference is not None): - self.warning( - "Setting one of `transform_target` and `transform_inference`, but not " - "the other." - ) - - if transform_target is not None: - assert transform_target is not None - assert transform_inference is not None - - if transform_support is not None: - assert transform_support is not None - - assert ( - len(transform_support) == 2 - ), "Please specify min and max for transformation support." - x_test = torch.from_numpy( - np.linspace(transform_support[0], transform_support[1], 10) - ) - else: - x_test = np.logspace(-6, 6, 12 + 1) - x_test = torch.from_numpy( - np.concatenate([-x_test[::-1], [0], x_test]) - ) - - # Add feature dimension before inference transformation to make it - # match the dimensions of a standard prediction. Remove it again - # before comparison. Temporary - try: - t_test = torch.unsqueeze(transform_target(x_test), -1) - t_test = torch.squeeze(transform_inference(t_test), -1) - valid = torch.isfinite(t_test) - - assert torch.allclose(t_test[valid], x_test[valid]), ( - "The provided transforms for targets during training and " - "predictions during inference are not inverse. Please " - "adjust transformation functions or support." - ) - del x_test, t_test, valid - - except IndexError: - self.warning( - "transform_target and/or transform_inference rely on " - "indexing, which we won't validate. Please make sure that " - "they are mutually inverse, i.e. that\n" - " x = transform_inference(transform_target(x))\n" - "for all x that are within your target range." - ) - - # Set transforms - if transform_prediction_and_target is not None: - self._transform_prediction_training = ( - transform_prediction_and_target - ) - self._transform_target = transform_prediction_and_target - else: - if transform_target is not None: - self._transform_target = transform_target - if transform_inference is not None: - self._transform_prediction_inference = transform_inference
- - - -
-[docs] -class LearnedTask(Task): - """Task class with a learned mapping. - - Applies a learned mapping between the last latent layer of `Model` and - target space. E.g. the `LearnedTask` contains learnable parameters that - acts like a prediction head. - """ - - def __init__( - self, - hidden_size: int, - **task_kwargs: Any, - ): - """Construct `LearnedTask`. - - Args: - hidden_size: The number of columns in the output of - the last latent layer of `Model` using this Task. - Available through `Model.nb_outputs` - """ - # Base class constructor - super().__init__(**task_kwargs) - - # Mapping from last hidden layer to required size of input - self._affine = Linear(hidden_size, self.nb_inputs) - - @abstractmethod - def _forward( # type: ignore - self, x: Union[Tensor, Data] - ) -> Union[Tensor, Data]: - """Syntax like `.forward`, for implentation in inheriting classes.""" - raise NotImplementedError - -
-[docs] - @abstractmethod - def compute_loss(self, pred: Union[Tensor, Data], data: Data) -> Tensor: - """Compute loss of `pred` wrt. - - target labels in `data`. - """
- - - @property - @abstractmethod - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - -
-[docs] - @final - def forward( # type: ignore - self, x: Union[Tensor, Data] - ) -> Union[Tensor, Data]: - """Forward call for `LearnedTask`. - - The learned embedding transforms last latent layer of Model to meet - target dimensions. - """ - self._regularisation_loss = 0 # Reset - x = self._affine(x) - x = self._forward(x=x) - return self._transform_prediction(x)
-
- - - -
-[docs] -class StandardLearnedTask(LearnedTask): - """Standard class for classification and reconstruction in GraphNeT. - - This class comes with a definition of `compute_loss` that is compatible - with the vast majority of supervised learning tasks. - """ - - def __init__( - self, - hidden_size: int, - **task_kwargs: Any, - ): - """Construct `StandardLearnedTask`. - - Args: - hidden_size: The number of columns in the output of - the last latent layer of `Model` using this Task. - Available through `Model.nb_outputs` - """ - # Base class constructor - super().__init__(hidden_size=hidden_size, **task_kwargs) - - @property - @abstractmethod - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - - @abstractmethod - def _forward(self, x: Union[Tensor, Data]) -> Union[Tensor, Data]: - """Syntax like `.forward`, for implentation in inheriting classes.""" - -
-[docs] - @final - def compute_loss(self, pred: Union[Tensor, Data], data: Data) -> Tensor: - """Compute supervised learning loss. - - Grabs truth labels in `data` and sends both `pred` and `target` to loss - function for evaluation. Suits most supervised learning `Task`s. - """ - target = torch.stack( - [data[label] for label in self._target_labels], dim=1 - ) - target = self._transform_target(target) - if self._loss_weight is not None: - weights = data[self._loss_weight] - else: - weights = None - loss = ( - self._loss_function(pred, target, weights=weights) - + self._regularisation_loss - ) - return loss
-
- - - -
-[docs] -class IdentityTask(StandardLearnedTask): - """Identity, or trivial, task.""" - - def __init__( - self, - nb_outputs: int, - target_labels: Union[List[str], Any], - *args: Any, - **kwargs: Any, - ): - """Construct IdentityTask. - - A task that does not apply a learned embedding to the input. It returns - the direct inputs from `Model`. - """ - self._nb_inputs = nb_outputs - self._default_target_labels = ( - target_labels - if isinstance(target_labels, list) - else [target_labels] - ) - self._default_prediction_labels = [ - f"target_{i}_pred" for i in range(len(self._default_target_labels)) - ] - - super().__init__(*args, **kwargs) - # Base class constructor - - @property - def default_target_labels(self) -> List[str]: - """Return default target labels.""" - return self._default_target_labels - - @property - def default_prediction_labels(self) -> List[str]: - """Return default prediction labels.""" - return self._default_prediction_labels - - @property - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - return self._nb_inputs - - def _forward(self, x: Union[Tensor, Data]) -> Tensor: # type: ignore - # Leave it as is. - return x
- - - -
-[docs] -class StandardFlowTask(Task): - """A `Task` for `NormalizingFlow`s in GraphNeT.""" - - def __init__( - self, - target_labels: List[str], - **task_kwargs: Any, - ): - """Construct `StandardLearnedTask`. - - Args: - target_labels: A list of names for the targets of this Task. - hidden_size: The number of columns in the output of - the last latent layer of `Model` using this Task. - Available through `Model.nb_outputs` - """ - # Base class constructor - super().__init__(target_labels=target_labels, **task_kwargs) - -
-[docs] - def nb_inputs(self) -> int: - """Return number of inputs assumed by task.""" - return len(self._target_labels)
- - - def _forward(self, x: Tensor, jacobian: Tensor) -> Tensor: # type: ignore - # Leave it as is. - return x - -
-[docs] - @final - def forward( - self, x: Union[Tensor, Data], jacobian: Optional[Tensor] - ) -> Union[Tensor, Data]: - """Forward pass.""" - self._regularisation_loss = 0 # Reset - x = self._forward(x, jacobian) - return self._transform_prediction(x)
- - -
-[docs] - @final - def compute_loss( - self, prediction: Tensor, jacobian: Tensor, data: Data - ) -> Tensor: - """Compute loss for normalizing flow tasks. - - Args: - prediction: transformed sample in latent distribution space. - jacobian: the jacobian associated with the transformation. - data: the graph object. - - Returns: - the loss associated with the transformation. - """ - if self._loss_weight is not None: - weights = data[self._loss_weight] - else: - weights = None - loss = ( - self._loss_function( - prediction=prediction, - jacobian=jacobian, - weights=weights, - target=None, - ) - + self._regularisation_loss - ) - return loss
-
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/models/utils.html b/_modules/graphnet/models/utils.html deleted file mode 100644 index 977a318ec..000000000 --- a/_modules/graphnet/models/utils.html +++ /dev/null @@ -1,430 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.models.utils — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.models.utils

-"""Utility functions for `graphnet.models`."""
-
-from typing import List, Tuple, Union
-from torch_geometric.nn import knn_graph
-from torch_geometric.data import Batch
-import torch
-from torch import Tensor, LongTensor
-
-from torch_geometric.utils.homophily import homophily
-
-
-
-[docs] -def calculate_xyzt_homophily( - x: Tensor, edge_index: LongTensor, batch: Batch -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - """Calculate xyzt-homophily from a batch of graphs. - - Homophily is a graph scalar quantity that measures the likeness of - variables in nodes. Notice that this calculator assumes a special order of - input features in x. - - Returns: - Tuple, each element with shape [batch_size,1]. - """ - hx = homophily(edge_index, x[:, 0], batch).reshape(-1, 1) - hy = homophily(edge_index, x[:, 1], batch).reshape(-1, 1) - hz = homophily(edge_index, x[:, 2], batch).reshape(-1, 1) - ht = homophily(edge_index, x[:, 3], batch).reshape(-1, 1) - return hx, hy, hz, ht
- - - -
-[docs] -def calculate_distance_matrix(xyz_coords: Tensor) -> Tensor: - """Calculate the matrix of pairwise distances between pulses. - - Args: - xyz_coords: (x,y,z)-coordinates of pulses, of shape [nb_doms, 3]. - - Returns: - Matrix of pairwise distances, of shape [nb_doms, nb_doms] - """ - diff = xyz_coords.unsqueeze(dim=2) - xyz_coords.T.unsqueeze(dim=0) - return torch.sqrt(torch.sum(diff**2, dim=1))
- - - -
-[docs] -def knn_graph_batch(batch: Batch, k: List[int], columns: List[int]) -> Batch: - """Calculate k-nearest-neighbours with individual k for each batch event. - - Args: - batch: Batch of events. - k: A list of k's. - columns: The columns of Data.x used for computing the distances. E.g., - Data.x[:,[0,1,2]] - - Returns: - Returns the same batch of events, but with updated edges. - """ - data_list = batch.to_data_list() - for i in range(len(data_list)): - data_list[i].edge_index = knn_graph( - x=data_list[i].x[:, columns], k=k[i] - ) - return Batch.from_data_list(data_list)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/graphnet/training/utils.html b/_modules/graphnet/training/utils.html deleted file mode 100644 index 547bf5619..000000000 --- a/_modules/graphnet/training/utils.html +++ /dev/null @@ -1,697 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - graphnet.training.utils — graphnet documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Skip to content -
- -
- - -
- - - - -
-
- -
-
-
- -
-
-
-
-
-
- - -
-
-
- -
-
- -

Source code for graphnet.training.utils

-"""Utility functions for `graphnet.training`."""
-
-from collections import OrderedDict
-import os
-from typing import Dict, List, Optional, Tuple, Union, Callable
-
-import numpy as np
-import pandas as pd
-from pytorch_lightning import Trainer
-from sklearn.model_selection import train_test_split
-from torch.utils.data import DataLoader
-from torch_geometric.data import Batch, Data
-
-from graphnet.data.dataset import Dataset
-from graphnet.data.dataset import SQLiteDataset
-from graphnet.data.dataset import ParquetDataset
-from graphnet.models import Model
-from graphnet.utilities.logging import Logger
-from graphnet.models.graphs import GraphDefinition
-
-
-
-[docs] -def collate_fn(graphs: List[Data]) -> Batch: - """Remove graphs with less than two DOM hits. - - Should not occur in "production". - """ - graphs = [g for g in graphs if g.n_pulses > 1] - return Batch.from_data_list(graphs)
- - - -
-[docs] -class collator_sequence_buckleting: - """Perform the sequence bucketing for the graphs in the batch.""" - - def __init__(self, batch_splits: List[float] = [0.8]): - """Set cutting points of the different mini-batches. - - batch_splits: list of floats, each element is the fraction of the total - number of graphs. This list should not explicitly define the first and - last elements, which will always be 0 and 1 respectively. - """ - self.batch_splits = batch_splits - - def __call__(self, graphs: List[Data]) -> Batch: - """Execute sequence bucketing on the input list of graphs. - - Args: - graphs: A list of Data objects representing the input graphs. - - Returns: - A list of Batch objects, each containing a mini-batch of the input - graphs sorted by their number of pulses. - """ - graphs = [g for g in graphs if g.n_pulses > 1] - graphs.sort(key=lambda x: x.n_pulses) - batch_list = [] - - for minp, maxp in zip( - [0] + self.batch_splits, self.batch_splits + [1] - ): - min_idx = int(minp * len(graphs)) - max_idx = int(maxp * len(graphs)) - this_graphs = graphs[min_idx:max_idx] - if len(this_graphs) > 0: - this_batch = Batch.from_data_list(this_graphs) - batch_list.append(this_batch) - return batch_list
- - - -# @TODO: Remove in favour of DataLoader{,.from_dataset_config} -
-[docs] -def make_dataloader( - db: str, - pulsemaps: Union[str, List[str]], - graph_definition: GraphDefinition, - features: List[str], - truth: List[str], - *, - batch_size: int, - shuffle: bool, - selection: Optional[List[int]] = None, - num_workers: int = 10, - persistent_workers: bool = True, - node_truth: List[str] = None, - truth_table: str = "truth", - node_truth_table: Optional[str] = None, - string_selection: List[int] = None, - loss_weight_table: Optional[str] = None, - loss_weight_column: Optional[str] = None, - index_column: str = "event_no", - labels: Optional[Dict[str, Callable]] = None, -) -> DataLoader: - """Construct `DataLoader` instance.""" - # Check(s) - if isinstance(pulsemaps, str): - pulsemaps = [pulsemaps] - - dataset = SQLiteDataset( - path=db, - pulsemaps=pulsemaps, - features=features, - truth=truth, - selection=selection, - node_truth=node_truth, - truth_table=truth_table, - node_truth_table=node_truth_table, - string_selection=string_selection, - loss_weight_table=loss_weight_table, - loss_weight_column=loss_weight_column, - index_column=index_column, - graph_definition=graph_definition, - ) - - # adds custom labels to dataset - if isinstance(labels, dict): - for label in labels.keys(): - dataset.add_label(key=label, fn=labels[label]) - - dataloader = DataLoader( - dataset, - batch_size=batch_size, - shuffle=shuffle, - num_workers=num_workers, - collate_fn=collate_fn, - persistent_workers=persistent_workers, - prefetch_factor=2, - ) - - return dataloader
- - - -# @TODO: Remove in favour of DataLoader{,.from_dataset_config} -
-[docs] -def make_train_validation_dataloader( - db: str, - graph_definition: GraphDefinition, - selection: Optional[List[int]], - pulsemaps: Union[str, List[str]], - features: List[str], - truth: List[str], - *, - batch_size: int, - database_indices: Optional[List[int]] = None, - seed: int = 42, - test_size: float = 0.33, - num_workers: int = 10, - persistent_workers: bool = True, - node_truth: Optional[str] = None, - truth_table: str = "truth", - node_truth_table: Optional[str] = None, - string_selection: Optional[List[int]] = None, - loss_weight_column: Optional[str] = None, - loss_weight_table: Optional[str] = None, - index_column: str = "event_no", - labels: Optional[Dict[str, Callable]] = None, -) -> Tuple[DataLoader, DataLoader]: - """Construct train and test `DataLoader` instances.""" - # Reproducibility - rng = np.random.default_rng(seed=seed) - # Checks(s) - if isinstance(pulsemaps, str): - pulsemaps = [pulsemaps] - - if selection is None: - # If no selection is provided, use all events in dataset. - dataset: Dataset - if db.endswith(".db"): - dataset = SQLiteDataset( - path=db, - graph_definition=graph_definition, - pulsemaps=pulsemaps, - features=features, - truth=truth, - truth_table=truth_table, - index_column=index_column, - ) - elif db.endswith(".parquet"): - dataset = ParquetDataset( - path=db, - graph_definition=graph_definition, - pulsemaps=pulsemaps, - features=features, - truth=truth, - truth_table=truth_table, - index_column=index_column, - ) - else: - raise RuntimeError( - f"File {db} with format {db.split('.'[-1])} not supported." - ) - selection = dataset._get_all_indices() - - # Perform train/validation split - if isinstance(db, list): - df_for_shuffle = pd.DataFrame( - {"event_no": selection, "db": database_indices} - ) - shuffled_df = df_for_shuffle.sample( - frac=1, replace=False, random_state=rng - ) - training_df, validation_df = train_test_split( - shuffled_df, test_size=test_size, random_state=seed - ) - training_selection = training_df.values.tolist() - validation_selection = validation_df.values.tolist() - else: - training_selection, validation_selection = train_test_split( - selection, test_size=test_size, random_state=seed - ) - - # Create DataLoaders - common_kwargs = dict( - db=db, - pulsemaps=pulsemaps, - features=features, - truth=truth, - batch_size=batch_size, - num_workers=num_workers, - persistent_workers=persistent_workers, - node_truth=node_truth, - truth_table=truth_table, - node_truth_table=node_truth_table, - string_selection=string_selection, - loss_weight_column=loss_weight_column, - loss_weight_table=loss_weight_table, - index_column=index_column, - labels=labels, - graph_definition=graph_definition, - ) - - training_dataloader = make_dataloader( - shuffle=True, - selection=training_selection, - **common_kwargs, # type: ignore[arg-type] - ) - - validation_dataloader = make_dataloader( - shuffle=False, - selection=validation_selection, - **common_kwargs, # type: ignore[arg-type] - ) - - return ( - training_dataloader, - validation_dataloader, - )
- - - -# @TODO: Remove in favour of Model.predict{,_as_dataframe} -
-[docs] -def get_predictions( - trainer: Trainer, - model: Model, - dataloader: DataLoader, - prediction_columns: List[str], - *, - node_level: bool = False, - additional_attributes: Optional[List[str]] = None, -) -> pd.DataFrame: - """Get `model` predictions on `dataloader`.""" - # Gets predictions from model on the events in the dataloader. - # NOTE: dataloader must NOT have shuffle = True! - - # Check(s) - if additional_attributes is None: - additional_attributes = [] - assert isinstance(additional_attributes, list) - - # Set model to inference mode - model.inference() - - # Get predictions - predictions_torch = trainer.predict(model, dataloader) - predictions_list = [ - p[0].detach().cpu().numpy() for p in predictions_torch - ] # Assuming single task - predictions = np.concatenate(predictions_list, axis=0) - try: - assert len(prediction_columns) == predictions.shape[1] - except IndexError: - predictions = predictions.reshape((-1, 1)) - assert len(prediction_columns) == predictions.shape[1] - - # Get additional attributes - attributes: Dict[str, List[np.ndarray]] = OrderedDict( - [(attr, []) for attr in additional_attributes] - ) - for batch in dataloader: - for attr in attributes: - attribute = batch[attr].detach().cpu().numpy() - if node_level: - if attr == "event_no": - attribute = np.repeat( - attribute, batch["n_pulses"].detach().cpu().numpy() - ) - attributes[attr].extend(attribute) - - data = np.concatenate( - [predictions] - + [ - np.asarray(values)[:, np.newaxis] for values in attributes.values() - ], - axis=1, - ) - - results = pd.DataFrame( - data, columns=prediction_columns + additional_attributes - ) - return results
- - - -# @TODO: Remove -
-[docs] -def save_results( - db: str, tag: str, results: pd.DataFrame, archive: str, model: Model -) -> None: - """Save trained model and prediction `results` in `db`.""" - db_name = db.split("/")[-1].split(".")[0] - path = archive + "/" + db_name + "/" + tag - os.makedirs(path, exist_ok=True) - results.to_csv(path + "/results.csv") - model.save_state_dict(path + "/" + tag + "_state_dict.pth") - model.save(path + "/" + tag + "_model.pth") - Logger().info("Results saved at: \n %s" % path)
- -
- -
-
-
-
-
- - - - - \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html index b5d45ae22..0f4299580 100644 --- a/_modules/index.html +++ b/_modules/index.html @@ -323,10 +323,6 @@

All modules for which code is available

@@ -571,36 +465,7 @@
@@ -610,202 +475,8 @@
-
-

dataset

-

Base Dataset class(es) used in GraphNeT.

-
-
-exception graphnet.data.dataset.dataset.ColumnMissingException[source]
-

Bases: Exception

-

Exception to indicate a missing column in a dataset.

-
-
-
-graphnet.data.dataset.dataset.load_module(class_name)[source]
-

Load graphnet module from string name.

-
-
Parameters:
-

class_name (str) – name of class

-
-
Return type:
-

Type

-
-
Returns:
-

graphnet module.

-
-
-
-
-
-graphnet.data.dataset.dataset.parse_graph_definition(cfg)[source]
-

Construct GraphDefinition from DatasetConfig.

-
-
Return type:
-

GraphDefinition

-
-
Parameters:
-

cfg (dict) –

-
-
-
-
-
-class graphnet.data.dataset.dataset.Dataset(*args, **kwargs)[source]
-

Bases: Logger, Configurable, Dataset, ABC

-

Base Dataset class for reading from any intermediate file format.

-

Construct Dataset.

-
-
Parameters:
-
    -
  • path (Union[str, List[str]]) – Path to the file(s) from which this Dataset should read.

  • -
  • pulsemaps (Union[str, List[str]]) – Name(s) of the pulse map series that should be used to -construct the nodes on the individual graph objects, and their -features. Multiple pulse series maps can be used, e.g., when -different DOM types are stored in different maps.

  • -
  • features (List[str]) – List of columns in the input files that should be used as -node features on the graph objects.

  • -
  • truth (List[str]) – List of event-level columns in the input files that should -be used added as attributes on the graph objects.

  • -
  • node_truth (Optional[List[str]], default: None) – List of node-level columns in the input files that -should be used added as attributes on the graph objects.

  • -
  • index_column (str, default: 'event_no') – Name of the column in the input files that contains -unique indicies to identify and map events across tables.

  • -
  • truth_table (str, default: 'truth') – Name of the table containing event-level truth -information.

  • -
  • node_truth_table (Optional[str], default: None) – Name of the table containing node-level truth -information.

  • -
  • string_selection (Optional[List[int]], default: None) – Subset of strings for which data should be read -and used to construct graph objects. Defaults to None, meaning -all strings for which data exists are used.

  • -
  • selection (Union[str, List[int], List[List[int]], None], default: None) – The events that should be read. This can be given either -as list of indicies (in index_column); or a string-based -selection used to query the Dataset for events passing the -selection. Defaults to None, meaning that all events in the -input files are read.

  • -
  • dtype (dtype, default: torch.float32) – Type of the feature tensor on the graph objects returned.

  • -
  • loss_weight_table (Optional[str], default: None) – Name of the table containing per-event loss -weights.

  • -
  • loss_weight_column (Optional[str], default: None) – Name of the column in loss_weight_table -containing per-event loss weights. This is also the name of the -corresponding attribute assigned to the graph object.

  • -
  • loss_weight_default_value (Optional[float], default: None) – Default per-event loss weight. -NOTE: This default value is only applied when -loss_weight_table and loss_weight_column are specified, and -in this case to events with no value in the corresponding -table/column. That is, if no per-event loss weight table/column -is provided, this value is ignored. Defaults to None.

  • -
  • seed (Optional[int], default: None) – Random number generator seed, used for selecting a random -subset of events when resolving a string-based selection (e.g., -“10000 random events ~ event_no % 5 > 0” or “20% random -events ~ event_no % 5 > 0”).

  • -
  • graph_definition (GraphDefinition) – Method that defines the graph representation.

  • -
  • args (Any) –

  • -
  • kwargs (Any) –

  • -
-
-
Return type:
-

object

-
-
-
-
-classmethod from_config(source)[source]
-

Construct Dataset instance from source configuration.

-
-
Return type:
-

Union[Dataset, EnsembleDataset, Dict[str, Dataset], Dict[str, EnsembleDataset]]

-
-
Parameters:
-

source (DatasetConfig | str) –

-
-
-
-
-
-classmethod concatenate(datasets)[source]
-

Concatenate multiple `Dataset`s into one instance.

-
-
Return type:
-

EnsembleDataset

-
-
Parameters:
-

datasets (List[Dataset]) –

-
-
-
-
-
-property path: str | List[str]
-

Path to the file(s) from which this Dataset reads.

-
-
-
-property truth_table: str
-

Name of the table containing event-level truth information.

-
-
-
-abstract query_table(table, columns, sequential_index, selection)[source]
-

Query a table at a specific index, optionally with some selection.

-
-
Parameters:
-
    -
  • table (str) – Table to be queried.

  • -
  • columns (Union[List[str], str]) – Columns to read out.

  • -
  • sequential_index (Optional[int], default: None) – Sequentially numbered index -(i.e. in [0,len(self))) of the event to query. This _may_ -differ from the indexation used in self._indices. If no value -is provided, the entire column is returned.

  • -
  • selection (Optional[str], default: None) – Selection to be imposed before reading out data. -Defaults to None.

  • -
-
-
Return type:
-

List[Tuple[Any, ...]]

-
-
Returns:
-

-
List of tuples containing the values in columns. If the table

contains only scalar data for columns, a list of length 1 is -returned

-
-
-

-
-
Raises:
-

ColumnMissingException – If one or more element in columns is not - present in table.

-
-
-
-
-
-add_label(fn, key)[source]
-

Add custom graph label define using function fn.

-
-
Return type:
-

None

-
-
Parameters:
-
    -
  • fn (Callable[[Data], Any]) –

  • -
  • key (str | None) –

  • -
-
-
-
-
-
-
-class graphnet.data.dataset.dataset.EnsembleDataset(datasets)[source]
-

Bases: ConcatDataset

-

Construct a single dataset from a collection of datasets.

-

Construct a single dataset from a collection of datasets.

-
-
Parameters:
-

datasets (Iterable[Dataset]) – A collection of Datasets

-
-
-
+
+

dataset

diff --git a/api/graphnet.data.dataset.html b/api/graphnet.data.dataset.html index 4dc0f578f..60df5f0c1 100644 --- a/api/graphnet.data.dataset.html +++ b/api/graphnet.data.dataset.html @@ -474,9 +474,8 @@
-
-

dataset

-

Dataset classes for training in GraphNeT.

+
+

dataset

Subpackages

diff --git a/api/graphnet.data.dataset.parquet.html b/api/graphnet.data.dataset.parquet.html index d66b67d51..92b378c4d 100644 --- a/api/graphnet.data.dataset.parquet.html +++ b/api/graphnet.data.dataset.parquet.html @@ -482,16 +482,12 @@
-
-

parquet

-

Datasets using parquet backend.

+
+

parquet

Submodules

diff --git a/api/graphnet.data.dataset.parquet.parquet_dataset.html b/api/graphnet.data.dataset.parquet.parquet_dataset.html index 349f42c6b..82f085002 100644 --- a/api/graphnet.data.dataset.parquet.parquet_dataset.html +++ b/api/graphnet.data.dataset.parquet.parquet_dataset.html @@ -328,36 +328,11 @@ - - @@ -498,18 +473,7 @@
@@ -519,87 +483,8 @@
-
-

parquet_dataset

-

Dataset class(es) for reading from Parquet files.

-
-
-class graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset(*args, **kwargs)[source]
-

Bases: Dataset

-

Pytorch dataset for reading from Parquet files.

-

Construct Dataset.

-
-
Parameters:
-
    -
  • path (Union[str, List[str]]) – Path to the file(s) from which this Dataset should read.

  • -
  • pulsemaps (Union[str, List[str]]) – Name(s) of the pulse map series that should be used to -construct the nodes on the individual graph objects, and their -features. Multiple pulse series maps can be used, e.g., when -different DOM types are stored in different maps.

  • -
  • features (List[str]) – List of columns in the input files that should be used as -node features on the graph objects.

  • -
  • truth (List[str]) – List of event-level columns in the input files that should -be used added as attributes on the graph objects.

  • -
  • node_truth (Optional[List[str]], default: None) – List of node-level columns in the input files that -should be used added as attributes on the graph objects.

  • -
  • index_column (str, default: 'event_no') – Name of the column in the input files that contains -unique indicies to identify and map events across tables.

  • -
  • truth_table (str, default: 'truth') – Name of the table containing event-level truth -information.

  • -
  • node_truth_table (Optional[str], default: None) – Name of the table containing node-level truth -information.

  • -
  • string_selection (Optional[List[int]], default: None) – Subset of strings for which data should be read -and used to construct graph objects. Defaults to None, meaning -all strings for which data exists are used.

  • -
  • selection (Union[str, List[int], List[List[int]], None], default: None) – The events that should be read. This can be given either -as list of indicies (in index_column); or a string-based -selection used to query the Dataset for events passing the -selection. Defaults to None, meaning that all events in the -input files are read.

  • -
  • dtype (dtype, default: torch.float32) – Type of the feature tensor on the graph objects returned.

  • -
  • loss_weight_table (Optional[str], default: None) – Name of the table containing per-event loss -weights.

  • -
  • loss_weight_column (Optional[str], default: None) – Name of the column in loss_weight_table -containing per-event loss weights. This is also the name of the -corresponding attribute assigned to the graph object.

  • -
  • loss_weight_default_value (Optional[float], default: None) – Default per-event loss weight. -NOTE: This default value is only applied when -loss_weight_table and loss_weight_column are specified, and -in this case to events with no value in the corresponding -table/column. That is, if no per-event loss weight table/column -is provided, this value is ignored. Defaults to None.

  • -
  • seed (Optional[int], default: None) – Random number generator seed, used for selecting a random -subset of events when resolving a string-based selection (e.g., -“10000 random events ~ event_no % 5 > 0” or “20% random -events ~ event_no % 5 > 0”).

  • -
  • graph_definition (GraphDefinition) – Method that defines the graph representation.

  • -
  • args (Any) –

  • -
  • kwargs (Any) –

  • -
-
-
Return type:
-

object

-
-
-
-
-query_table(table, columns, sequential_index, selection)[source]
-

Query table at a specific index, optionally with some selection.

-
-
Return type:
-

List[Tuple[Any, ...]]

-
-
Parameters:
-
    -
  • table (str) –

  • -
  • columns (List[str] | str) –

  • -
  • sequential_index (int | None) –

  • -
  • selection (str | None) –

  • -
-
-
-
-
+
+

parquet_dataset

diff --git a/api/graphnet.data.dataset.sqlite.html b/api/graphnet.data.dataset.sqlite.html index 8c8466a90..1564c771f 100644 --- a/api/graphnet.data.dataset.sqlite.html +++ b/api/graphnet.data.dataset.sqlite.html @@ -482,16 +482,12 @@
-
-

sqlite

-

Datasets using SQLite backend.

+
+

sqlite

Submodules

diff --git a/api/graphnet.data.dataset.sqlite.sqlite_dataset.html b/api/graphnet.data.dataset.sqlite.sqlite_dataset.html index e7c699424..41e7fac49 100644 --- a/api/graphnet.data.dataset.sqlite.sqlite_dataset.html +++ b/api/graphnet.data.dataset.sqlite.sqlite_dataset.html @@ -335,36 +335,11 @@ - - @@ -498,18 +473,7 @@
@@ -519,87 +483,8 @@
-
-

sqlite_dataset

-

Dataset class(es) for reading data from SQLite databases.

-
-
-class graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset(*args, **kwargs)[source]
-

Bases: Dataset

-

Pytorch dataset for reading data from SQLite databases.

-

Construct Dataset.

-
-
Parameters:
-
    -
  • path (Union[str, List[str]]) – Path to the file(s) from which this Dataset should read.

  • -
  • pulsemaps (Union[str, List[str]]) – Name(s) of the pulse map series that should be used to -construct the nodes on the individual graph objects, and their -features. Multiple pulse series maps can be used, e.g., when -different DOM types are stored in different maps.

  • -
  • features (List[str]) – List of columns in the input files that should be used as -node features on the graph objects.

  • -
  • truth (List[str]) – List of event-level columns in the input files that should -be used added as attributes on the graph objects.

  • -
  • node_truth (Optional[List[str]], default: None) – List of node-level columns in the input files that -should be used added as attributes on the graph objects.

  • -
  • index_column (str, default: 'event_no') – Name of the column in the input files that contains -unique indicies to identify and map events across tables.

  • -
  • truth_table (str, default: 'truth') – Name of the table containing event-level truth -information.

  • -
  • node_truth_table (Optional[str], default: None) – Name of the table containing node-level truth -information.

  • -
  • string_selection (Optional[List[int]], default: None) – Subset of strings for which data should be read -and used to construct graph objects. Defaults to None, meaning -all strings for which data exists are used.

  • -
  • selection (Union[str, List[int], List[List[int]], None], default: None) – The events that should be read. This can be given either -as list of indicies (in index_column); or a string-based -selection used to query the Dataset for events passing the -selection. Defaults to None, meaning that all events in the -input files are read.

  • -
  • dtype (dtype, default: torch.float32) – Type of the feature tensor on the graph objects returned.

  • -
  • loss_weight_table (Optional[str], default: None) – Name of the table containing per-event loss -weights.

  • -
  • loss_weight_column (Optional[str], default: None) – Name of the column in loss_weight_table -containing per-event loss weights. This is also the name of the -corresponding attribute assigned to the graph object.

  • -
  • loss_weight_default_value (Optional[float], default: None) – Default per-event loss weight. -NOTE: This default value is only applied when -loss_weight_table and loss_weight_column are specified, and -in this case to events with no value in the corresponding -table/column. That is, if no per-event loss weight table/column -is provided, this value is ignored. Defaults to None.

  • -
  • seed (Optional[int], default: None) – Random number generator seed, used for selecting a random -subset of events when resolving a string-based selection (e.g., -“10000 random events ~ event_no % 5 > 0” or “20% random -events ~ event_no % 5 > 0”).

  • -
  • graph_definition (GraphDefinition) – Method that defines the graph representation.

  • -
  • args (Any) –

  • -
  • kwargs (Any) –

  • -
-
-
Return type:
-

object

-
-
-
-
-query_table(table, columns, sequential_index, selection)[source]
-

Query table at a specific index, optionally with some selection.

-
-
Return type:
-

List[Tuple[Any, ...]]

-
-
Parameters:
-
    -
  • table (str) –

  • -
  • columns (List[str] | str) –

  • -
  • sequential_index (int | None) –

  • -
  • selection (str | None) –

  • -
-
-
-
-
+
+

sqlite_dataset

diff --git a/api/graphnet.data.html b/api/graphnet.data.html index 5e2ae42f4..ecbcd0f4f 100644 --- a/api/graphnet.data.html +++ b/api/graphnet.data.html @@ -514,22 +514,14 @@
  • DataConverter
  • -
  • dataloader -
  • +
  • dataloader
  • filters
  • -
  • pipeline -
  • +
  • pipeline
  • diff --git a/api/graphnet.data.pipeline.html b/api/graphnet.data.pipeline.html index 4564c978d..f616b57ec 100644 --- a/api/graphnet.data.pipeline.html +++ b/api/graphnet.data.pipeline.html @@ -379,25 +379,11 @@ - - @@ -457,14 +443,7 @@
    @@ -474,36 +453,8 @@
    -
    -

    pipeline

    -

    Class(es) used for analysis in PISA.

    -
    -
    -class graphnet.data.pipeline.InSQLitePipeline(module_dict, features, truth, device, retro_table_name, outdir, batch_size, n_workers, pipeline_name)[source]
    -

    Bases: ABC, Logger

    -

    Create a SQLite database for PISA analysis.

    -

    The database will contain truth and GNN predictions and, if available, -RETRO reconstructions.

    -

    Initialise the pipeline.

    -
    -
    Parameters:
    -
      -
    • module_dict (Dict) – A dictionary with GNN modules from GraphNet. E.g. -{‘energy’: gnn_module_for_energy_regression}

    • -
    • features (List[str]) – List of input features for the GNN modules.

    • -
    • truth (List[str]) – List of truth for the GNN ModuleList.

    • -
    • device (device) – The device used for computation.

    • -
    • retro_table_name (str, default: 'retro') – Name of the retro table for.

    • -
    • outdir (Optional[str], default: None) – the directory in which the pipeline database will be -stored.

    • -
    • batch_size (int, default: 100) – Batch size for inference.

    • -
    • n_workers (int, default: 10) – Number of workers used in dataloading.

    • -
    • pipeline_name (str, default: 'pipeline') – Name of the pipeline. If such a pipeline already -exists, an error will be prompted to avoid overwriting.

    • -
    -
    -
    -
    +
    +

    pipeline

    diff --git a/api/graphnet.data.utilities.string_selection_resolver.html b/api/graphnet.data.utilities.string_selection_resolver.html index 2abdce906..b404e94d3 100644 --- a/api/graphnet.data.utilities.string_selection_resolver.html +++ b/api/graphnet.data.utilities.string_selection_resolver.html @@ -559,7 +559,7 @@
    Parameters:
    @@ -427,18 +395,7 @@ @@ -448,94 +405,8 @@
    -
    -

    graphnet_module

    -

    Class(es) for deploying GraphNeT models in icetray as I3Modules.

    -
    -
    -class graphnet.deployment.i3modules.graphnet_module.GraphNeTI3Module(graph_definition, pulsemap, features, pulsemap_extractor, gcd_file)[source]
    -

    Bases: object

    -

    Base I3 Module for GraphNeT.

    -

    Contains methods for extracting pulsemaps, producing graphs and writing to -frames.

    -

    I3Module Constructor.

    -
    -
    Parameters:
    -
      -
    • graph_definition (GraphDefinition) – An instance of GraphDefinition. E.g. KNNGraph.

    • -
    • pulsemap (str) – the pulse map on which the module functions

    • -
    • features (List[str]) – the features that is used from the pulse map. -E.g. [dom_x, dom_y, dom_z, charge]

    • -
    • pulsemap_extractor (Union[List[I3FeatureExtractor], I3FeatureExtractor]) – The I3FeatureExtractor used to extract the -pulsemap from the I3Frames

    • -
    • gcd_file (str) – Path to the associated gcd-file.

    • -
    -
    -
    -
    -
    -
    -class graphnet.deployment.i3modules.graphnet_module.I3InferenceModule(pulsemap, features, pulsemap_extractor, model_config, state_dict, model_name, gcd_file, prediction_columns)[source]
    -

    Bases: GraphNeTI3Module

    -

    General class for inference on i3 frames.

    -

    General class for inference on I3Frames (physics).

    -
    -
    Parameters:
    -
      -
    • pulsemap (str) – the pulsmap that the model is expecting as input.

    • -
    • features (List[str]) – the features of the pulsemap that the model is expecting.

    • -
    • pulsemap_extractor (Union[List[I3FeatureExtractor], I3FeatureExtractor]) – The extractor used to extract the pulsemap.

    • -
    • model_config (Union[ModelConfig, str]) – The ModelConfig (or path to it) that summarizes the -model used for inference.

    • -
    • state_dict (str) – Path to state_dict containing the learned weights.

    • -
    • model_name (str) – The name used for the model. Will help define the -named entry in the I3Frame. E.g. “dynedge”.

    • -
    • gcd_file (str) – path to associated gcd file.

    • -
    • prediction_columns (Union[str, List[str], None], default: None) –

      column names for the predictions of the model. -Will help define the named entry in the I3Frame.

      -
      -

      E.g. [‘energy_reco’]. Optional.

      -
      -

    • -
    -
    -
    -
    -
    -
    -class graphnet.deployment.i3modules.graphnet_module.I3PulseCleanerModule(pulsemap, features, pulsemap_extractor, model_config, state_dict, model_name, *, gcd_file, threshold, discard_empty_events, prediction_columns)[source]
    -

    Bases: I3InferenceModule

    -

    A specialized module for pulse cleaning.

    -

    It is assumed that the model provided has been trained for this.

    -

    General class for inference on I3Frames (physics).

    -
    -
    Parameters:
    -
      -
    • pulsemap (str) – the pulsmap that the model is expecting as input -(the one that is being cleaned).

    • -
    • features (List[str]) – the features of the pulsemap that the model is expecting.

    • -
    • pulsemap_extractor (Union[List[I3FeatureExtractor], I3FeatureExtractor]) – The extractor used to extract the pulsemap.

    • -
    • model_config (str) – The ModelConfig (or path to it) that summarizes the -model used for inference.

    • -
    • state_dict (str) – Path to state_dict containing the learned weights.

    • -
    • model_name (str) – The name used for the model. Will help define the named -entry in the I3Frame. E.g. “dynedge”.

    • -
    • gcd_file (str) – path to associated gcd file.

    • -
    • threshold (float, default: 0.7) – the threshold for being considered a positive case. -E.g., predictions >= threshold will be considered -to be signal, all else noise.

    • -
    • discard_empty_events (bool, default: False) – When true, this flag will eliminate events -whose cleaned pulse series are empty. Can be used -to speed up processing especially for noise -simulation, since it will not do any writing or -further calculations.

    • -
    • prediction_columns (Union[str, List[str], None], default: None) – column names for the predictions of the model. -Will help define the named entry in the I3Frame. -E.g. [‘energy_reco’]. Optional.

    • -
    -
    -
    -
    +
    +

    graphnet_module

    diff --git a/api/graphnet.deployment.i3modules.html b/api/graphnet.deployment.i3modules.html index 434164c3f..6a247ab6a 100644 --- a/api/graphnet.deployment.i3modules.html +++ b/api/graphnet.deployment.i3modules.html @@ -410,12 +410,7 @@

    i3modules

    diff --git a/api/graphnet.models.coarsening.html b/api/graphnet.models.coarsening.html index 602eed114..369288274 100644 --- a/api/graphnet.models.coarsening.html +++ b/api/graphnet.models.coarsening.html @@ -125,7 +125,6 @@ - @@ -366,90 +365,11 @@ - -
  • @@ -516,30 +436,7 @@ @@ -549,134 +446,8 @@
    -
    -

    coarsening

    -

    Class(es) for coarsening operations (i.e., clustering, or local pooling).

    -
    -
    -graphnet.models.coarsening.unbatch_edge_index(edge_index, batch)[source]
    -

    Splits the edge_index according to a batch vector.

    -
    -
    Parameters:
    -
      -
    • edge_index (Tensor) – The edge_index tensor. Must be ordered.

    • -
    • batch (LongTensor) – The batch vector -\(\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N\), which assigns each -node to a specific example. Must be ordered.

    • -
    -
    -
    Return type:
    -

    List[Tensor]

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.Coarsening(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for coarsening operations.

    -

    Construct Coarsening.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -reduce_options = {'avg': (<function avg_pool>, <function avg_pool_x>), 'max': (<function max_pool>, <function max_pool_x>), 'min': (<function min_pool>, <function min_pool_x>), 'sum': (<function sum_pool>, <function sum_pool_x>)}
    -
    -
    -
    -forward(data)[source]
    -

    Perform coarsening operation.

    -
    -
    Return type:
    -

    Union[Data, Batch]

    -
    -
    Parameters:
    -

    data (Data | Batch) –

    -
    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.AttributeCoarsening(*args, **kwargs)[source]
    -

    Bases: Coarsening

    -

    Coarsen pulses based on specified attributes.

    -

    Construct SimpleCoarsening.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.DOMCoarsening(*args, **kwargs)[source]
    -

    Bases: Coarsening

    -

    Coarsen pulses to DOM-level.

    -

    Cluster pulses on the same DOM.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.CustomDOMCoarsening(*args, **kwargs)[source]
    -

    Bases: DOMCoarsening

    -

    Coarsen pulses to DOM-level with additional attributes.

    -

    Cluster pulses on the same DOM.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.coarsening.DOMAndTimeWindowCoarsening(*args, **kwargs)[source]
    -

    Bases: Coarsening

    -

    Coarsen pulses to DOM-level, with additional time-window clustering.

    -

    Cluster pulses on the same DOM within time_window.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    coarsening

    diff --git a/api/graphnet.models.components.html b/api/graphnet.models.components.html index 073301a16..e72fdd239 100644 --- a/api/graphnet.models.components.html +++ b/api/graphnet.models.components.html @@ -460,31 +460,13 @@
    -
    -

    components

    -

    Components for constructing models.

    +
    +

    components

    Submodules

    diff --git a/api/graphnet.models.components.layers.html b/api/graphnet.models.components.layers.html index f7cebd134..2e3ad6d0b 100644 --- a/api/graphnet.models.components.layers.html +++ b/api/graphnet.models.components.layers.html @@ -336,94 +336,11 @@ - -
  • @@ -534,34 +451,7 @@
    @@ -571,145 +461,8 @@
    -
    -

    layers

    -

    Class(es) implementing layers to be used in graphnet models.

    -
    -
    -class graphnet.models.components.layers.DynEdgeConv(nn, aggr, nb_neighbors, features_subset, **kwargs)[source]
    -

    Bases: EdgeConv, LightningModule

    -

    Dynamical edge convolution layer.

    -

    Construct DynEdgeConv.

    -
    -
    Parameters:
    -
      -
    • nn (Callable) – The MLP/torch.Module to be used within the EdgeConv.

    • -
    • aggr (str, default: 'max') – Aggregation method to be used with EdgeConv.

    • -
    • nb_neighbors (int, default: 8) – Number of neighbours to be clustered after the -EdgeConv operation.

    • -
    • features_subset (Union[Sequence[int], slice, None], default: None) – Subset of features in Data.x that should be used -when dynamically performing the new graph clustering after the -EdgeConv operation. Defaults to all features.

    • -
    • **kwargs (Any) – Additional features to be passed to EdgeConv.

    • -
    -
    -
    -
    -
    -forward(x, edge_index, batch)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x (Tensor) –

    • -
    • edge_index (Tensor | SparseTensor) –

    • -
    • batch (Tensor | None) –

    • -
    -
    -
    -
    -
    -
    -
    -class graphnet.models.components.layers.EdgeConvTito(nn, aggr, **kwargs)[source]
    -

    Bases: MessagePassing, LightningModule

    -

    Implementation of EdgeConvTito layer used in TITO solution for.

    -

    ‘IceCube - Neutrinos in Deep’ kaggle competition.

    -

    Construct EdgeConvTito.

    -
    -
    Parameters:
    -
      -
    • nn (Callable) – The MLP/torch.Module to be used within the EdgeConvTito.

    • -
    • aggr (str, default: 'max') – Aggregation method to be used with EdgeConvTito.

    • -
    • **kwargs (Any) – Additional features to be passed to EdgeConvTito.

    • -
    -
    -
    -
    -
    -reset_parameters()[source]
    -

    Reset all learnable parameters of the module.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -forward(x, edge_index)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x (Tensor | Tuple[Tensor, Tensor]) –

    • -
    • edge_index (Tensor | SparseTensor) –

    • -
    -
    -
    -
    -
    -
    -message(x_i, x_j)[source]
    -

    Edgeconvtito message passing.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x_i (Tensor) –

    • -
    • x_j (Tensor) –

    • -
    -
    -
    -
    -
    -
    -
    -class graphnet.models.components.layers.DynTrans(layer_sizes, aggr, features_subset, n_head, **kwargs)[source]
    -

    Bases: EdgeConvTito, LightningModule

    -

    Implementation of dynTrans1 layer used in TITO solution for.

    -

    ‘IceCube - Neutrinos in Deep’ kaggle competition.

    -

    Construct DynTrans.

    -
    -
    Parameters:
    -
      -
    • nn – The MLP/torch.Module to be used within the DynTrans.

    • -
    • layer_sizes (Optional[List[int]], default: None) – List of layer sizes to be used in DynTrans.

    • -
    • aggr (str, default: 'max') – Aggregation method to be used with DynTrans.

    • -
    • features_subset (Union[Sequence[int], slice, None], default: None) – Subset of features in Data.x that should be used -when dynamically performing the new graph clustering after the -EdgeConv operation. Defaults to all features.

    • -
    • n_head (int, default: 8) – Number of heads to be used in the multiheadattention models.

    • -
    • **kwargs (Any) – Additional features to be passed to DynTrans.

    • -
    -
    -
    -
    -
    -forward(x, edge_index, batch)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • x (Tensor) –

    • -
    • edge_index (Tensor | SparseTensor) –

    • -
    • batch (Tensor | None) –

    • -
    -
    -
    -
    -
    +
    +

    layers

    diff --git a/api/graphnet.models.components.pool.html b/api/graphnet.models.components.pool.html index 00ca61435..5cd8d2b76 100644 --- a/api/graphnet.models.components.pool.html +++ b/api/graphnet.models.components.pool.html @@ -125,7 +125,6 @@ - @@ -344,106 +343,11 @@ - -
  • @@ -547,32 +451,7 @@ @@ -582,220 +461,8 @@
    -
    -

    pool

    -

    Functions for performing pooling/clustering/coarsening.

    -
    -
    -graphnet.models.components.pool.min_pool(cluster, data, transform)[source]
    -

    Perform min-pooling of Data.

    -

    Like max_pool, just negating `data.x.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) –

    • -
    • data (Data) –

    • -
    • transform (Any | None) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.components.pool.min_pool_x(cluster, x, batch, size)[source]
    -

    Perform min-pooling of Tensor.

    -

    Like max_pool_x, just negating `x.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) –

    • -
    • x (Tensor) –

    • -
    • batch (LongTensor) –

    • -
    • size (int | None) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.components.pool.sum_pool_and_distribute(tensor, cluster_index, batch)[source]
    -

    Sum-pool values and distribute result to the individual nodes.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • tensor (Tensor) –

    • -
    • cluster_index (LongTensor) –

    • -
    • batch (LongTensor | None) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.components.pool.group_by(data, keys)[source]
    -

    Group nodes in data that have identical values of keys.

    -

    This grouping is done with in each event in case of batching. This allows -for, e.g., assigning the same index to all pulses on the same PMT or DOM in -the same event. This can be used for coarsening graphs, e.g., from pulse- -level to DOM-level by aggregating feature across each group returned by this -method.

    -
    -
    Return type:
    -

    LongTensor

    -
    -
    Parameters:
    -
      -
    • data (Data | Batch) –

    • -
    • keys (List[str]) –

    • -
    -
    -
    -

    Example

    -
    -
    Given:

    data.f1 = [1,1,2,2,2] -data.f2 = [6,7,7,7,8]

    -
    -
    Calls:

    groupby(data, [‘f1’]) -> [0, 0, 1, 1, 1] -groupby(data, [‘f2’]) -> [0, 1, 1, 1, 2] -groupby(data, [‘f1’, ‘f2’]) -> [0, 1, 2, 2, 3]

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.group_pulses_to_dom(data)[source]
    -

    Group pulses on the same DOM, using DOM and string number.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.group_pulses_to_pmt(data)[source]
    -

    Group pulses on the same PMT, using PMT, DOM, and string number.

    -
    -
    Return type:
    -

    Data

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.sum_pool_x(cluster, x, batch, size)[source]
    -

    Sum-pool node features according to the clustering defined in cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • x (Tensor) – Node feature matrix -\(\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}\).

    • -
    • batch (LongTensor) – Batch vector \(\mathbf{b} \in {\{ 0, \ldots, -B-1\}}^N\), which assigns each node to a specific example.

    • -
    • size (Optional[int], default: None) – The maximum number of clusters in a single -example. This property is useful to obtain a batch-wise dense -representation, e.g. for applying FC layers, but should only be -used if the size of the maximum number of clusters per example is -known in advance.

    • -
    -
    -
    Return type:
    -

    Tensor

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.std_pool_x(cluster, x, batch, size)[source]
    -

    Std-pool node features according to the clustering defined in cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • x (Tensor) – Node feature matrix -\(\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}\).

    • -
    • batch (LongTensor) – Batch vector \(\mathbf{b} \in {\{ 0, \ldots, -B-1\}}^N\), which assigns each node to a specific example.

    • -
    • size (Optional[int], default: None) – The maximum number of clusters in a single -example. This property is useful to obtain a batch-wise dense -representation, e.g. for applying FC layers, but should only be -used if the size of the maximum number of clusters per example is -known in advance.

    • -
    -
    -
    Return type:
    -

    Tensor

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.sum_pool(cluster, data, transform)[source]
    -

    Pool and coarsen graph according to the clustering defined in cluster.

    -

    All nodes within the same cluster will be represented as one node. -Final node features are defined by the sum of features of all nodes -within the same cluster, node positions are averaged and edge indices are -defined to be the union of the edge indices of all nodes within the same -cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • data (Data) – Graph data object.

    • -
    • transform (Optional[Callable], default: None) – A function/transform that takes in the -coarsened and pooled torch_geometric.data.Data object and -returns a transformed version.

    • -
    -
    -
    Return type:
    -

    Data

    -
    -
    -
    -
    -
    -graphnet.models.components.pool.std_pool(cluster, data, transform)[source]
    -

    Pool and coarsen graph according to the clustering defined in cluster.

    -

    All nodes within the same cluster will be represented as one node. -Final node features are defined by the std of features of all nodes -within the same cluster, node positions are averaged and edge indices are -defined to be the union of the edge indices of all nodes within the same -cluster.

    -
    -
    Parameters:
    -
      -
    • cluster (LongTensor) – Cluster vector \(\mathbf{c} \in \{ 0, -\ldots, N - 1 \}^N\), which assigns each node to a specific cluster.

    • -
    • data (Data) – Graph data object.

    • -
    • transform (Optional[Callable], default: None) – A function/transform that takes in the -coarsened and pooled torch_geometric.data.Data object and -returns a transformed version.

    • -
    -
    -
    Return type:
    -

    Data

    -
    -
    -
    +
    +

    pool

    diff --git a/api/graphnet.models.gnn.convnet.html b/api/graphnet.models.gnn.convnet.html index 6b11359f2..333d51552 100644 --- a/api/graphnet.models.gnn.convnet.html +++ b/api/graphnet.models.gnn.convnet.html @@ -350,36 +350,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,47 +482,8 @@
    -
    -

    convnet

    -

    Implementation of the ConvNet GNN model architecture.

    -

    Author: Martin Ha Minh

    -
    -
    -class graphnet.models.gnn.convnet.ConvNet(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    ConvNet (convolutional network) model.

    -

    Construct ConvNet.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features, i.e. dimension of input -layer.

    • -
    • nb_outputs (int) – Number of prediction labels, i.e. dimension of -output layer.

    • -
    • nb_intermediate (int, default: 128) – Number of nodes in intermediate layer(s).

    • -
    • dropout_ratio (float, default: 0.3) – Fraction of nodes to drop.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    convnet

    diff --git a/api/graphnet.models.gnn.dynedge.html b/api/graphnet.models.gnn.dynedge.html index ec76a893b..8b6a25eb0 100644 --- a/api/graphnet.models.gnn.dynedge.html +++ b/api/graphnet.models.gnn.dynedge.html @@ -357,36 +357,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,69 +482,8 @@
    -
    -

    dynedge

    -

    Implementation of the DynEdge GNN model architecture.

    -
    -
    -class graphnet.models.gnn.dynedge.DynEdge(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    DynEdge (dynamical edge convolutional) model.

    -

    Construct DynEdge.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features on each node.

    • -
    • nb_neighbours (int, default: 8) – Number of neighbours to used in the k-nearest -neighbour clustering which is performed after each (dynamical) -edge convolution.

    • -
    • features_subset (Union[List[int], slice, None], default: None) – The subset of latent features on each node that -are used as metric dimensions when performing the k-nearest -neighbours clustering. Defaults to [0,1,2].

    • -
    • dynedge_layer_sizes (Optional[List[Tuple[int, ...]]], default: None) – The layer sizes, or latent feature dimenions, -used in the DynEdgeConv layer. Each entry in -dynedge_layer_sizes corresponds to a single DynEdgeConv -layer; the integers in the corresponding tuple corresponds to -the layer sizes in the multi-layer perceptron (MLP) that is -applied within each DynEdgeConv layer. That is, a list of -size-two tuples means that all DynEdgeConv layers contain a -two-layer MLP. -Defaults to [(128, 256), (336, 256), (336, 256), (336, 256)].

    • -
    • post_processing_layer_sizes (Optional[List[int]], default: None) – Hidden layer sizes in the MLP -following the skip-concatenation of the outputs of each -DynEdgeConv layer. Defaults to [336, 256].

    • -
    • readout_layer_sizes (Optional[List[int]], default: None) – Hidden layer sizes in the MLP following the -post-processing _and_ optional global pooling. As this is the -last layer(s) in the model, the last layer in the read-out -yields the output of the DynEdge model. Defaults to [128,].

    • -
    • global_pooling_schemes (Union[str, List[str], None], default: None) – The list global pooling schemes to use. -Options are: “min”, “max”, “mean”, and “sum”.

    • -
    • add_global_variables_after_pooling (bool, default: False) – Whether to add global variables -after global pooling. The alternative is to added (distribute) -them to the individual nodes before any convolutional -operations.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    dynedge

    diff --git a/api/graphnet.models.gnn.dynedge_jinst.html b/api/graphnet.models.gnn.dynedge_jinst.html index 562835544..a006285ec 100644 --- a/api/graphnet.models.gnn.dynedge_jinst.html +++ b/api/graphnet.models.gnn.dynedge_jinst.html @@ -364,36 +364,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,44 +482,8 @@
    -
    -

    dynedge_jinst

    -

    Implementation of the exact DynEdge architecture used in [2209.03042].

    -

    Author: Rasmus Oersoe

    -
    -
    -class graphnet.models.gnn.dynedge_jinst.DynEdgeJINST(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    DynEdge (dynamical edge convolutional) model used in [2209.03042].

    -

    Construct DynEdgeJINST.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features.

    • -
    • nb_outputs – Number of output features.

    • -
    • layer_size_scale (int, default: 4) – Integer that scales the size of hidden layers.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    dynedge_jinst

    diff --git a/api/graphnet.models.gnn.dynedge_kaggle_tito.html b/api/graphnet.models.gnn.dynedge_kaggle_tito.html index bf31a35ba..7231ceb1c 100644 --- a/api/graphnet.models.gnn.dynedge_kaggle_tito.html +++ b/api/graphnet.models.gnn.dynedge_kaggle_tito.html @@ -371,36 +371,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,58 +482,8 @@
    -
    -

    dynedge_kaggle_tito

    -

    Implementation of DynEdge architecture used in.

    -
    -

    IceCube - Neutrinos in Deep Ice

    -
    -

    Reconstruct the direction of neutrinos from the Universe to the South Pole

    -

    Kaggle competition.

    -

    Solution by TITO.

    -
    -
    -class graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO(*args, **kwargs)[source]
    -

    Bases: GNN

    -

    DynEdgeTITO (dynamical edge convolutional with Transformer) model.

    -

    Construct DynEdgeTITO.

    -
    -
    Parameters:
    -
      -
    • nb_inputs (int) – Number of input features on each node.

    • -
    • features_subset (Optional[List[int]], default: None) – The subset of latent features on each node that -are used as metric dimensions when performing the k-nearest -neighbours clustering. Defaults to [0,1,2,3].

    • -
    • dyntrans_layer_sizes (Optional[List[Tuple[int, ...]]], default: None) – The layer sizes, or latent feature dimenions, -used in the DynTrans layer. -Defaults to [(256, 256), (256, 256), (256, 256), (256, 256)].

    • -
    • global_pooling_schemes (List[str], default: ['max']) – The list global pooling schemes to use. -Options are: “min”, “max”, “mean”, and “sum”.

    • -
    • use_global_features (bool, default: True) – Whether to use global features after pooling.

    • -
    • use_post_processing_layers (bool, default: True) – Whether to use post-processing layers -after the DynTrans layers.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(data)[source]
    -

    Apply learnable forward pass.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    dynedge_kaggle_tito

    diff --git a/api/graphnet.models.gnn.gnn.html b/api/graphnet.models.gnn.gnn.html index 4d1518d3a..9391893f5 100644 --- a/api/graphnet.models.gnn.gnn.html +++ b/api/graphnet.models.gnn.gnn.html @@ -378,54 +378,11 @@ - -
  • @@ -515,22 +472,7 @@
    @@ -540,50 +482,8 @@
    -
    -

    gnn

    -

    Base GNN-specific Model class(es).

    -
    -
    -class graphnet.models.gnn.gnn.GNN(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for all core GNN models in graphnet.

    -

    Construct GNN.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -property nb_inputs: int
    -

    Return number of input features.

    -
    -
    -
    -property nb_outputs: int
    -

    Return number of output features.

    -
    -
    -
    -abstract forward(data)[source]
    -

    Apply learnable forward pass in model.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -

    data (Data) –

    -
    -
    -
    -
    +
    +

    gnn

    diff --git a/api/graphnet.models.gnn.html b/api/graphnet.models.gnn.html index b84087b08..c92088621 100644 --- a/api/graphnet.models.gnn.html +++ b/api/graphnet.models.gnn.html @@ -481,32 +481,16 @@
    -
    -

    gnn

    -

    GNN-specific modules, for performing the main learnable operations.

    +
    +

    gnn

    Submodules

    diff --git a/api/graphnet.models.graphs.edges.edges.html b/api/graphnet.models.graphs.edges.edges.html index c638b906f..b752b8a91 100644 --- a/api/graphnet.models.graphs.edges.edges.html +++ b/api/graphnet.models.graphs.edges.edges.html @@ -363,63 +363,11 @@ - -
  • @@ -539,24 +487,7 @@
    @@ -566,114 +497,8 @@
    -
    -

    edges

    -

    Class(es) for building/connecting graphs.

    -
    -
    -class graphnet.models.graphs.edges.edges.EdgeDefinition(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for graph building.

    -

    Construct Logger.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(graph)[source]
    -

    Construct edges based on problem specific implementation of.

    -

    ´_construct_edges´

    -
    -
    Parameters:
    -

    graph (Data) – a graph without edges

    -
    -
    Returns:
    -

    a graph with edges

    -
    -
    Return type:
    -

    graph

    -
    -
    -
    -
    -
    -
    -class graphnet.models.graphs.edges.edges.KNNEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds edges from the k-nearest neighbours.

    -

    K-NN Edge definition.

    -

    Will connect nodes together with their ´nb_nearest_neighbours´ -nearest neighbours in the feature space given by ´columns´.

    -
    -
    Parameters:
    -
      -
    • nb_nearest_neighbours (int) – number of neighbours.

    • -
    • columns (List[int], default: [0, 1, 2]) – Node features to use for distance calculation.

    • -
    • [0 (Defaults to) –

    • -
    • 1

    • -
    • 2].

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.graphs.edges.edges.RadialEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds graph from a sphere of chosen radius centred at each node.

    -

    Radial edges.

    -

    Connects each node to other nodes that are within a sphere of -radius ´r´ centered at the node. The feature space of ´r´ is defined -by ´columns´

    -
    -
    Parameters:
    -
      -
    • radius (float) – radius of sphere

    • -
    • columns (List[int], default: [0, 1, 2]) – columns of the node feature matrix used.

    • -
    • [0 (Defaults to) –

    • -
    • 1

    • -
    • 2].

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.graphs.edges.edges.EuclideanEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds edges according to Euclidean distance between nodes.

    -

    See https://arxiv.org/pdf/1809.06166.pdf.

    -

    Construct EuclideanEdges.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    edges

    diff --git a/api/graphnet.models.graphs.edges.html b/api/graphnet.models.graphs.edges.html index 55df9498a..cd05a6814 100644 --- a/api/graphnet.models.graphs.edges.html +++ b/api/graphnet.models.graphs.edges.html @@ -496,27 +496,13 @@
    -
    -

    edges

    -

    Modules for constructing graphs.

    -

    ´GraphDefinition´ defines the nodes and their features, and contains general -graph-manipulation.´EdgeDefinition´ defines how edges are drawn between nodes -and their features.

    +
    +

    edges

    Submodules

    diff --git a/api/graphnet.models.graphs.edges.minkowski.html b/api/graphnet.models.graphs.edges.minkowski.html index b94c54396..1e75d8cc6 100644 --- a/api/graphnet.models.graphs.edges.minkowski.html +++ b/api/graphnet.models.graphs.edges.minkowski.html @@ -370,34 +370,11 @@ - -
  • @@ -510,16 +487,7 @@ @@ -529,54 +497,8 @@
    -
    -

    minkowski

    -

    Module containing EdgeDefinitions based on the Minkowski Metric.

    -
    -
    -graphnet.models.graphs.edges.minkowski.compute_minkowski_distance_mat(x, y, c, space_coords, time_coord)[source]
    -

    Compute all pairwise Minkowski distances.

    -
    -
    Parameters:
    -
      -
    • x (Tensor) – First tensor of shape (n, d).

    • -
    • y (Tensor) – Second tensor of shape (m, d).

    • -
    • c (float) – Speed of light, in scaled units.

    • -
    • space_coords (Optional[List[int]], default: None) – Indices of space coordinates.

    • -
    • time_coord (Optional[int], default: 3) – Index of time coordinate.

    • -
    -
    -
    Return type:
    -

    Tensor

    -
    -
    -

    Returns: Matrix of shape (n, m) of all pairwise Minkowski distances.

    -
    -
    -
    -class graphnet.models.graphs.edges.minkowski.MinkowskiKNNEdges(*args, **kwargs)[source]
    -

    Bases: EdgeDefinition

    -

    Builds edges between most light-like separated.

    -

    Initialize MinkowskiKNNEdges.

    -
    -
    Parameters:
    -
      -
    • nb_nearest_neighbours (int) – Number of neighbours to connect to.

    • -
    • c (float) – Speed of light, in scaled units.

    • -
    • time_like_weight (float, default: 1.0) – Preference to time-like over space-like edges. -Scales time_like distances by this value, before finding -nearest neighbours.

    • -
    • space_coords (Optional[List[int]], default: None) – Coordinates of x, y, z.

    • -
    • time_coord (Optional[int], default: 3) – Coordinate of time.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    minkowski

    diff --git a/api/graphnet.models.graphs.graph_definition.html b/api/graphnet.models.graphs.graph_definition.html index 85be967ee..3d62ee917 100644 --- a/api/graphnet.models.graphs.graph_definition.html +++ b/api/graphnet.models.graphs.graph_definition.html @@ -371,36 +371,11 @@ - -
  • @@ -497,18 +472,7 @@
    @@ -518,82 +482,8 @@
    -
    -

    graph_definition

    -

    Modules for defining graphs.

    -

    These are self-contained graph definitions that hold all the graph-altering -code in graphnet. These modules define what the GNNs sees as input and can be -passed to dataloaders during training and deployment.

    -
    -
    -class graphnet.models.graphs.graph_definition.GraphDefinition(*args, **kwargs)[source]
    -

    Bases: Model

    -

    An Abstract class to create graph definitions from.

    -

    Construct ´GraphDefinition´. The ´detector´ holds.

    -

    ´Detector´-specific code. E.g. scaling/standardization and geometry -tables.

    -

    ´node_definition´ defines the nodes in the graph.

    -

    ´edge_definition´ defines the connectivity of the nodes in the graph.

    -
    -
    Parameters:
    -
      -
    • detector (Detector) – The corresponding ´Detector´ representing the data.

    • -
    • node_definition (NodeDefinition, default: NodesAsPulses()) – Definition of nodes. Defaults to NodesAsPulses.

    • -
    • edge_definition (Optional[EdgeDefinition], default: None) – Definition of edges. Defaults to None.

    • -
    • input_feature_names (Optional[List[str]], default: None) – Names of each column in expected input data -that will be built into a graph. If not provided, -it is automatically assumed that all features in Detector is -used.

    • -
    • dtype (Optional[dtype], default: torch.float32) – data type used for node features. e.g. ´torch.float´

    • -
    • perturbation_dict (Optional[Dict[str, float]], default: None) – Dictionary mapping a feature name to a standard -deviation according to which the values for this -feature should be randomly perturbed. Defaults -to None.

    • -
    • seed (Union[int, Generator, None], default: None) – seed or Generator used to randomly sample perturbations. -Defaults to None.

    • -
    • add_inactive_sensors (bool, default: False) – If True, inactive sensors will be appended -to the graph with padded pulse information. Defaults to False.

    • -
    • sensor_mask (Optional[List[int]], default: None) – A list of sensor id’s to be masked from the graph. Any -sensor listed here will be removed from the graph. Defaults to None.

    • -
    • string_mask (Optional[List[int]], default: None) – A list of string id’s to be masked from the graph. Defaults to None.

    • -
    • sort_by (Optional[str], default: None) – Name of node feature to sort by. Defaults to None.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(input_features, input_feature_names, truth_dicts, custom_label_functions, loss_weight_column, loss_weight, loss_weight_default_value, data_path)[source]
    -

    Construct graph as ´Data´ object.

    -
    -
    Parameters:
    -
      -
    • input_features (ndarray) – Input features for graph construction. Shape ´[num_rows, d]´

    • -
    • input_feature_names (List[str]) – name of each column. Shape ´[,d]´.

    • -
    • truth_dicts (Optional[List[Dict[str, Any]]], default: None) – Dictionary containing truth labels.

    • -
    • custom_label_functions (Optional[Dict[str, Callable[..., Any]]], default: None) – Custom label functions. See https://github.com/graphnet-team/graphnet/blob/main/GETTING_STARTED.md#adding-custom-truth-labels.

    • -
    • loss_weight_column (Optional[str], default: None) – Name of column that holds loss weight. -Defaults to None.

    • -
    • loss_weight (Optional[float], default: None) – Loss weight associated with event. Defaults to None.

    • -
    • loss_weight_default_value (Optional[float], default: None) – default value for loss weight. -Used in instances where some events have -no pre-defined loss weight. Defaults to None.

    • -
    • data_path (Optional[str], default: None) – Path to dataset data files. Defaults to None.

    • -
    -
    -
    Return type:
    -

    Data

    -
    -
    Returns:
    -

    graph

    -
    -
    -
    -
    +
    +

    graph_definition

    diff --git a/api/graphnet.models.graphs.graphs.html b/api/graphnet.models.graphs.graphs.html index 81c36ff78..c42c6b03e 100644 --- a/api/graphnet.models.graphs.graphs.html +++ b/api/graphnet.models.graphs.graphs.html @@ -378,25 +378,11 @@ - -
  • @@ -486,14 +472,7 @@
    @@ -503,42 +482,8 @@
    -
    -

    graphs

    -

    A module containing different graph representations in GraphNeT.

    -
    -
    -class graphnet.models.graphs.graphs.KNNGraph(*args, **kwargs)[source]
    -

    Bases: GraphDefinition

    -

    A Graph representation where Edges are drawn to nearest neighbours.

    -

    Construct k-nn graph representation.

    -
    -
    Parameters:
    -
      -
    • detector (Detector) – Detector that represents your data.

    • -
    • node_definition (Optional[NodeDefinition], default: None) – Definition of nodes in the graph.

    • -
    • input_feature_names (Optional[List[str]], default: None) – Name of input feature columns.

    • -
    • dtype (Optional[dtype], default: torch.float32) – data type for node features.

    • -
    • perturbation_dict (Optional[Dict[str, float]], default: None) – Dictionary mapping a feature name to a standard -deviation according to which the values for this -feature should be randomly perturbed. Defaults -to None.

    • -
    • seed (Union[int, Generator, None], default: None) – seed or Generator used to randomly sample perturbations. -Defaults to None.

    • -
    • nb_nearest_neighbours (int, default: 8) – Number of edges for each node. Defaults to 8.

    • -
    • columns (List[int], default: [0, 1, 2]) – node feature columns used for distance calculation

    • -
    • [0 (. Defaults to) –

    • -
    • 1

    • -
    • 2].

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    graphs

    diff --git a/api/graphnet.models.graphs.html b/api/graphnet.models.graphs.html index 8e26eaac0..dcaf44d79 100644 --- a/api/graphnet.models.graphs.html +++ b/api/graphnet.models.graphs.html @@ -481,12 +481,8 @@
    -
    -

    graphs

    -

    Modules for constructing graphs.

    -

    ´GraphDefinition´ defines the nodes and their features, and contains general -graph-manipulation.´EdgeDefinition´ defines how edges are drawn between nodes -and their features.

    +
    +

    graphs

    Subpackages

    diff --git a/api/graphnet.models.graphs.nodes.html b/api/graphnet.models.graphs.nodes.html index 3796f586d..392dc3d0e 100644 --- a/api/graphnet.models.graphs.nodes.html +++ b/api/graphnet.models.graphs.nodes.html @@ -489,21 +489,12 @@
    -
    -

    nodes

    -

    Modules for constructing graphs.

    -

    ´GraphDefinition´ defines the nodes and their features, and contains general -graph-manipulation.´EdgeDefinition´ defines how edges are drawn between nodes -and their features.

    +
    +

    nodes

    Submodules

    diff --git a/api/graphnet.models.graphs.nodes.nodes.html b/api/graphnet.models.graphs.nodes.nodes.html index 69cbf1385..f3a2d6e1c 100644 --- a/api/graphnet.models.graphs.nodes.nodes.html +++ b/api/graphnet.models.graphs.nodes.nodes.html @@ -370,81 +370,11 @@ - -
  • @@ -550,28 +480,7 @@ @@ -581,127 +490,8 @@
    -
    -

    nodes

    -

    Class(es) for building/connecting graphs.

    -
    -
    -class graphnet.models.graphs.nodes.nodes.NodeDefinition(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for graph building.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -forward(x)[source]
    -

    Construct nodes from raw node features.

    -
    -
    Parameters:
    -
      -
    • x (tensor) – standardized node features with shape ´[num_pulses, d]´,

    • -
    • features. (where ´d´ is the number of node) –

    • -
    • node_feature_names – list of names for each column in ´x´.

    • -
    -
    -
    Returns:
    -

    a graph without edges -new_features_name: List of new feature names.

    -
    -
    Return type:
    -

    graph

    -
    -
    -
    -
    -
    -property nb_outputs: int
    -

    Return number of output features.

    -

    This the default, but may be overridden by specific inheriting classes.

    -
    -
    -
    -set_number_of_inputs(input_feature_names)[source]
    -

    Return number of inputs expected by node definition.

    -
    -
    Parameters:
    -

    input_feature_names (List[str]) – name of each input feature column.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -set_output_feature_names(input_feature_names)[source]
    -

    Set output features names as a member variable.

    -
    -
    Parameters:
    -
      -
    • input_feature_names (List[str]) – List of column names of the input to the

    • -
    • definition. (node) –

    • -
    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -
    -class graphnet.models.graphs.nodes.nodes.NodesAsPulses(*args, **kwargs)[source]
    -

    Bases: NodeDefinition

    -

    Represent each measured pulse of Cherenkov Radiation as a node.

    -

    Construct Detector.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.graphs.nodes.nodes.PercentileClusters(*args, **kwargs)[source]
    -

    Bases: NodeDefinition

    -

    Represent nodes as clusters with percentile summary node features.

    -

    If cluster_on is set to the xyz coordinates of DOMs -e.g. cluster_on = [‘dom_x’, ‘dom_y’, ‘dom_z’], each node will be a -unique DOM and the pulse information (charge, time) is summarized using -percentiles.

    -

    Construct PercentileClusters.

    -
    -
    Parameters:
    -
      -
    • cluster_on (List[str]) – Names of features to create clusters from.

    • -
    • percentiles (List[int]) – List of percentiles. E.g. [10, 50, 90].

    • -
    • add_counts (bool, default: True) – If True, number of duplicates is added to output array.

    • -
    • input_feature_names (Optional[List[str]], default: None) – (Optional) column names for input features.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    +
    +

    nodes

    diff --git a/api/graphnet.models.graphs.utils.html b/api/graphnet.models.graphs.utils.html index 779b94300..cd741c019 100644 --- a/api/graphnet.models.graphs.utils.html +++ b/api/graphnet.models.graphs.utils.html @@ -385,52 +385,11 @@ - - @@ -513,20 +472,7 @@ @@ -536,117 +482,8 @@
    -
    -

    utils

    -

    Utility functions for construction of graphs.

    -
    -
    -graphnet.models.graphs.utils.lex_sort(x, cluster_columns)[source]
    -

    Sort numpy arrays according to columns on ´cluster_columns´.

    -

    Note that x is sorted along the dimensions in cluster_columns -backwards. I.e. cluster_columns = [0,1,2] -means x is sorted along [2,1,0].

    -
    -
    Parameters:
    -
      -
    • x (array) – array to be sorted.

    • -
    • cluster_columns (List[int]) – Columns of x to be sorted along.

    • -
    -
    -
    Return type:
    -

    ndarray

    -
    -
    Returns:
    -

    A sorted version of x.

    -
    -
    -
    -
    -
    -graphnet.models.graphs.utils.gather_cluster_sequence(x, feature_idx, cluster_columns)[source]
    -

    Turn x into rows of clusters with sequences along columns.

    -

    Sequences along columns are added which correspond to -gathered sequences of the feature in x specified by column index -feature_idx associated with each column. Sequences are padded with NaN to -be of same length. Dimension of clustered array is [n_clusters, l + -len(cluster_columns)],where l is the largest sequence length.

    -

    Example: -Suppose x represents a neutrino event and we have chosen to cluster on -the PMT positions and that feature_idx correspond to pulse time.

    -

    The resulting array will have dimensions [n_pmts, m + 3] where m is the -maximum number of same-pmt pulses found in x, and `+3`for the three -spatial directions defining each cluster.

    -
    -
    Parameters:
    -
      -
    • x (ndarray) – Array for clustering

    • -
    • feature_idx (int) – Index of the feature in x to -be gathered for each cluster.

    • -
    • cluster_columns (List[int]) – Index in x from which to build clusters.

    • -
    -
    -
    Returns:
    -

    Array with dimensions [n_clusters, l + len(cluster_columns)] -column_offset: Indices of the columns in array that defines clusters.

    -
    -
    Return type:
    -

    array

    -
    -
    -
    -
    -
    -graphnet.models.graphs.utils.identify_indices(feature_names, cluster_on)[source]
    -

    Identify indices for clustering and summarization.

    -
    -
    Return type:
    -

    Tuple[List[int], List[int], List[str]]

    -
    -
    Parameters:
    -
      -
    • feature_names (List[str]) –

    • -
    • cluster_on (List[str]) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.graphs.utils.cluster_summarize_with_percentiles(x, summarization_indices, cluster_indices, percentiles, add_counts)[source]
    -

    Turn x into clusters with percentile summary.

    -

    From variables specified by column indices cluster_indices, x is turned -into clusters. Information in columns of x specified by indices -summarization_indices with each cluster is summarized using percentiles. -It is assumed x represents a single event.

    -

    Example use-case: -Suppose x contains raw pulses from a neutrino event where some DOMs have -multiple measurements of Cherenkov radiation. If cluster_indices is set -to the columns corresponding to the xyz-position of the DOMs, and the -features specified in summarization_indices correspond to time, charge, -then each row in the returned array will correspond to a DOM, -and the time and charge for each DOM will be summarized by percentiles. -Returned output array has dimensions -[n_clusters, len(percentiles)*len(summarization_indices) + len(cluster_indices)]

    -
    -
    Parameters:
    -
      -
    • x (ndarray) – Array to be clustered

    • -
    • summarization_indices (List[int]) – List of column indices that defines features -that will be summarized with percentiles.

    • -
    • cluster_indices (List[int]) – List of column indices on which the clusters -are constructed.

    • -
    • percentiles (List[int]) – percentiles used to summarize x. E.g. [10,50,90].

    • -
    • add_counts (bool) –

    • -
    -
    -
    Return type:
    -

    ndarray

    -
    -
    Returns:
    -

    Percentile-summarized array

    -
    -
    -
    +
    +

    utils

    diff --git a/api/graphnet.models.html b/api/graphnet.models.html index 6ef893d4d..473d93200 100644 --- a/api/graphnet.models.html +++ b/api/graphnet.models.html @@ -445,14 +445,8 @@
    -
    -

    models

    -

    Modules for configuring and building models.

    -

    graphnet.models allows for configuring and building complex GNN models using -simple, physics-oriented components. This module provides modular components -subclassing torch.nn.Module, meaning that users only need to import a few, -existing, purpose-built components and chain them together to form a complete -GNN

    +
    +

    models

    Subpackages

    diff --git a/api/graphnet.models.standard_model.html b/api/graphnet.models.standard_model.html index ed04dde69..a0e813fe9 100644 --- a/api/graphnet.models.standard_model.html +++ b/api/graphnet.models.standard_model.html @@ -379,144 +379,11 @@ - -
  • @@ -569,42 +436,7 @@ @@ -614,216 +446,8 @@
    -
    -

    standard_model

    -

    Standard model class(es).

    -
    -
    -class graphnet.models.standard_model.StandardModel(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Main class for standard models in graphnet.

    -

    This class chains together the different elements of a complete GNN- based -model (detector read-in, GNN backbone, and task-specific read-outs).

    -

    Construct StandardModel.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -fit(train_dataloader, val_dataloader, *, max_epochs, early_stopping_patience, gpus, callbacks, ckpt_path, logger, log_every_n_steps, gradient_clip_val, distribution_strategy, **trainer_kwargs)[source]
    -

    Fit StandardModel using pytorch_lightning.Trainer.

    -
    -
    Return type:
    -

    None

    -
    -
    Parameters:
    -
      -
    • train_dataloader (DataLoader) –

    • -
    • val_dataloader (DataLoader | None) –

    • -
    • max_epochs (int) –

    • -
    • early_stopping_patience (int) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • callbacks (List[Callback] | None) –

    • -
    • ckpt_path (str | None) –

    • -
    • logger (Logger | None) –

    • -
    • log_every_n_steps (int) –

    • -
    • gradient_clip_val (float | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    • trainer_kwargs (Any) –

    • -
    -
    -
    -
    -
    -
    -property target_labels: List[str]
    -

    Return target label.

    -
    -
    -
    -property prediction_labels: List[str]
    -

    Return prediction labels.

    -
    -
    -
    -configure_optimizers()[source]
    -

    Configure the model’s optimizer(s).

    -
    -
    Return type:
    -

    Dict[str, Any]

    -
    -
    -
    -
    -
    -forward(data)[source]
    -

    Forward pass, chaining model components.

    -
    -
    Return type:
    -

    List[Union[Tensor, Data]]

    -
    -
    Parameters:
    -

    data (Data | List[Data]) –

    -
    -
    -
    -
    -
    -shared_step(batch, batch_idx)[source]
    -

    Perform shared step.

    -

    Applies the forward pass and the following loss calculation, shared -between the training and validation step.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • batch (List[Data]) –

    • -
    • batch_idx (int) –

    • -
    -
    -
    -
    -
    -
    -training_step(train_batch, batch_idx)[source]
    -

    Perform training step.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • train_batch (Data | List[Data]) –

    • -
    • batch_idx (int) –

    • -
    -
    -
    -
    -
    -
    -validation_step(val_batch, batch_idx)[source]
    -

    Perform validation step.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • val_batch (Data | List[Data]) –

    • -
    • batch_idx (int) –

    • -
    -
    -
    -
    -
    -
    -compute_loss(preds, data, verbose)[source]
    -

    Compute and sum losses across tasks.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • preds (Tensor) –

    • -
    • data (List[Data]) –

    • -
    • verbose (bool) –

    • -
    -
    -
    -
    -
    -
    -inference()[source]
    -

    Activate inference mode.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -train(mode)[source]
    -

    Deactivate inference mode.

    -
    -
    Return type:
    -

    Model

    -
    -
    Parameters:
    -

    mode (bool) –

    -
    -
    -
    -
    -
    -predict(dataloader, gpus, distribution_strategy)[source]
    -

    Return predictions for dataloader.

    -
    -
    Return type:
    -

    List[Tensor]

    -
    -
    Parameters:
    -
      -
    • dataloader (DataLoader) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    -
    -
    -
    -
    -
    -predict_as_dataframe(dataloader, prediction_columns, *, additional_attributes, gpus, distribution_strategy)[source]
    -

    Return predictions for dataloader as a DataFrame.

    -

    Include additional_attributes as additional columns in the output -DataFrame.

    -
    -
    Return type:
    -

    DataFrame

    -
    -
    Parameters:
    -
      -
    • dataloader (DataLoader) –

    • -
    • prediction_columns (List[str] | None) –

    • -
    • additional_attributes (List[str] | None) –

    • -
    • gpus (List[int] | int | None) –

    • -
    • distribution_strategy (str | None) –

    • -
    -
    -
    -
    -
    +
    +

    standard_model

    diff --git a/api/graphnet.models.task.classification.html b/api/graphnet.models.task.classification.html index 7692bc6e2..09c86ff7d 100644 --- a/api/graphnet.models.task.classification.html +++ b/api/graphnet.models.task.classification.html @@ -364,101 +364,11 @@ - -
  • @@ -548,34 +458,7 @@ @@ -585,98 +468,8 @@
    -
    -

    classification

    -

    Classification-specific Model class(es).

    -
    -
    -class graphnet.models.task.classification.MulticlassClassificationTask(*args, **kwargs)[source]
    -

    Bases: IdentityTask

    -

    General task for classifying any number of classes.

    -

    Requires the same number of input features as the number of classes being -predicted. Returns the untransformed latent features, which are interpreted -as the logits for each class being classified.

    -

    Construct IdentityTask.

    -

    A task that does not apply a learned embedding to the input. It returns -the direct inputs from Model.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -
    -class graphnet.models.task.classification.BinaryClassificationTask(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Performs binary classification.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -default_target_labels = ['target']
    -
    -
    -
    -default_prediction_labels = ['target_pred']
    -
    -
    -
    -
    -class graphnet.models.task.classification.BinaryClassificationTaskLogits(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Performs binary classification form logits.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -default_target_labels = ['target']
    -
    -
    -
    -default_prediction_labels = ['target_pred']
    -
    -
    +
    +

    classification

    diff --git a/api/graphnet.models.task.html b/api/graphnet.models.task.html index d5d882f9e..3ac7f9967 100644 --- a/api/graphnet.models.task.html +++ b/api/graphnet.models.task.html @@ -467,42 +467,14 @@
    -
    -

    task

    -

    Physics task-specific modules to be used as model “read-outs”.

    +
    +

    task

    Submodules

    diff --git a/api/graphnet.models.task.reconstruction.html b/api/graphnet.models.task.reconstruction.html index 8b5c8d1d6..2b4943ac1 100644 --- a/api/graphnet.models.task.reconstruction.html +++ b/api/graphnet.models.task.reconstruction.html @@ -371,510 +371,11 @@ - -
  • @@ -957,142 +458,7 @@ @@ -1102,439 +468,8 @@
    -
    -

    reconstruction

    -

    Reconstruction-specific Model class(es).

    -
    -
    -class graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs azimuthal angle and associated kappa (1/var).

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['azimuth']
    -
    -
    -
    -default_prediction_labels = ['azimuth_pred', 'azimuth_kappa']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.AzimuthReconstruction(*args, **kwargs)[source]
    -

    Bases: AzimuthReconstructionWithKappa

    -

    Reconstructs azimuthal angle.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['azimuth']
    -
    -
    -
    -default_prediction_labels = ['azimuth_pred']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.DirectionReconstructionWithKappa(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs direction with kappa from the 3D-vMF distribution.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['direction']
    -
    -
    -
    -default_prediction_labels = ['dir_x_pred', 'dir_y_pred', 'dir_z_pred', 'direction_kappa']
    -
    -
    -
    -nb_inputs = 3
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.ZenithReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs zenith angle.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['zenith']
    -
    -
    -
    -default_prediction_labels = ['zenith_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.ZenithReconstructionWithKappa(*args, **kwargs)[source]
    -

    Bases: ZenithReconstruction

    -

    Reconstructs zenith angle and associated kappa (1/var).

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['zenith']
    -
    -
    -
    -default_prediction_labels = ['zenith_pred', 'zenith_kappa']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs energy using stable method.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy']
    -
    -
    -
    -default_prediction_labels = ['energy_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyReconstructionWithPower(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs energy.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy']
    -
    -
    -
    -default_prediction_labels = ['energy_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyTCReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs track and cascade energies using stable method.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy_track', 'energy_cascade']
    -
    -
    -
    -default_prediction_labels = ['energy_track_pred', 'energy_cascade_pred']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty(*args, **kwargs)[source]
    -

    Bases: EnergyReconstruction

    -

    Reconstructs energy and associated uncertainty (log(var)).

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['energy']
    -
    -
    -
    -default_prediction_labels = ['energy_pred', 'energy_sigma']
    -
    -
    -
    -nb_inputs = 2
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.VertexReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs vertex position and time.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['vertex']
    -
    -
    -
    -default_prediction_labels = ['position_x_pred', 'position_y_pred', 'position_z_pred', 'interaction_time_pred']
    -
    -
    -
    -nb_inputs = 4
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.PositionReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs vertex position.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['position']
    -
    -
    -
    -default_prediction_labels = ['position_x_pred', 'position_y_pred', 'position_z_pred']
    -
    -
    -
    -nb_inputs = 3
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.TimeReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs time.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['interaction_time']
    -
    -
    -
    -default_prediction_labels = ['interaction_time_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    -
    -
    -class graphnet.models.task.reconstruction.InelasticityReconstruction(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Reconstructs interaction inelasticity.

    -

    That is, 1-(track energy / hadronic energy).

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -default_target_labels = ['inelasticity']
    -
    -
    -
    -default_prediction_labels = ['inelasticity_pred']
    -
    -
    -
    -nb_inputs = 1
    -
    -
    +
    +

    reconstruction

    diff --git a/api/graphnet.models.task.task.html b/api/graphnet.models.task.task.html index 1c6b88a25..c6347337d 100644 --- a/api/graphnet.models.task.task.html +++ b/api/graphnet.models.task.task.html @@ -378,215 +378,11 @@ - -
  • @@ -662,64 +458,7 @@ @@ -729,302 +468,8 @@
    -
    -

    task

    -

    Base physics task-specific Model class(es).

    -
    -
    -class graphnet.models.task.task.Task(*args, **kwargs)[source]
    -

    Bases: Model

    -

    Base class for Tasks in GraphNeT.

    -

    Construct Task.

    -
    -
    Parameters:
    -
      -
    • loss_function (LossFunction) – Loss function appropriate to the task.

    • -
    • target_labels (Union[str, List[str], None], default: None) – Name(s) of the quantity/-ies being predicted, used -to extract the target tensor(s) from the Data object in -.compute_loss(…).

    • -
    • prediction_labels (Union[str, List[str], None], default: None) – The name(s) of each column that is predicted by -the model during inference. If not given, the name will auto -matically be set to target_label + _pred.

    • -
    • transform_prediction_and_target (Optional[Callable], default: None) – Optional function to transform -both the predicted and target tensor before passing them to the -loss function. Useful e.g. for having the model predict -quantities on a physical scale, but transforming this scale to -O(1) for a numerically stable loss computation.

    • -
    • transform_target (Optional[Callable], default: None) – Optional function to transform only the target -tensor before passing it, and the predicted tensor, to the loss -function. Useful e.g. for having the model predict a -transformed version of the target quantity, e.g. the log10- -scaled energy, rather than the physical quantity itself. Used -in conjunction with transform_inference to perform the -inverse transform on the predicted quantity to recover the -physical scale.

    • -
    • transform_inference (Optional[Callable], default: None) – Optional function to inverse-transform the -model prediction to recover a physical scale. Used in -conjunction with transform_target.

    • -
    • transform_support (Optional[Tuple], default: None) – Optional tuple to specify minimum and maximum -of the range of validity for the inverse transforms -transform_target and transform_inference in case this is -restricted. By default the invertibility of transform_target -is tested on the range [-1e6, 1e6].

    • -
    • loss_weight (Optional[str], default: None) – Name of the attribute in data containing per-event -loss weights.

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -abstract property nb_inputs: int
    -

    Return number of inputs assumed by task.

    -
    -
    -
    -property default_target_labels: List[str]
    -

    Return default target labels.

    -
    -
    -
    -property default_prediction_labels: List[str]
    -

    Return default prediction labels.

    -
    -
    -
    -inference()[source]
    -

    Activate inference mode.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -train_eval()[source]
    -

    Deactivate inference mode.

    -
    -
    Return type:
    -

    None

    -
    -
    -
    -
    -
    -
    -class graphnet.models.task.task.LearnedTask(*args, **kwargs)[source]
    -

    Bases: Task

    -

    Task class with a learned mapping.

    -

    Applies a learned mapping between the last latent layer of Model and -target space. E.g. the LearnedTask contains learnable parameters that -acts like a prediction head.

    -

    Construct LearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -abstract compute_loss(pred, data)[source]
    -

    Compute loss of pred wrt.

    -

    target labels in data.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • pred (Tensor | Data) –

    • -
    • data (Data) –

    • -
    -
    -
    -
    -
    -
    -abstract property nb_inputs: int
    -

    Return number of inputs assumed by task.

    -
    -
    -
    -forward(x)[source]
    -

    Forward call for LearnedTask.

    -

    The learned embedding transforms last latent layer of Model to meet -target dimensions.

    -
    -
    Return type:
    -

    Union[Tensor, Data]

    -
    -
    Parameters:
    -

    x (Tensor | Data) –

    -
    -
    -
    -
    -
    -
    -class graphnet.models.task.task.StandardLearnedTask(*args, **kwargs)[source]
    -

    Bases: LearnedTask

    -

    Standard class for classification and reconstruction in GraphNeT.

    -

    This class comes with a definition of compute_loss that is compatible -with the vast majority of supervised learning tasks.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • hidden_size (int) – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -abstract property nb_inputs: int
    -

    Return number of inputs assumed by task.

    -
    -
    -
    -compute_loss(pred, data)[source]
    -

    Compute supervised learning loss.

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Parameters:
    -
      -
    • pred (Tensor | Data) –

    • -
    • data (Data) –

    • -
    -
    -
    -

    Grabs truth labels in data and sends both pred and target to loss -function for evaluation. Suits most supervised learning `Task`s.

    -
    -
    -
    -
    -class graphnet.models.task.task.IdentityTask(*args, **kwargs)[source]
    -

    Bases: StandardLearnedTask

    -

    Identity, or trivial, task.

    -

    Construct IdentityTask.

    -

    A task that does not apply a learned embedding to the input. It returns -the direct inputs from Model.

    -
    -
    Parameters:
    -
      -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -property default_target_labels: List[str]
    -

    Return default target labels.

    -
    -
    -
    -property default_prediction_labels: List[str]
    -

    Return default prediction labels.

    -
    -
    -
    -property nb_inputs: int
    -

    Return number of inputs assumed by task.

    -
    -
    -
    -
    -class graphnet.models.task.task.StandardFlowTask(*args, **kwargs)[source]
    -

    Bases: Task

    -

    A Task for `NormalizingFlow`s in GraphNeT.

    -

    Construct StandardLearnedTask.

    -
    -
    Parameters:
    -
      -
    • target_labels (List[str]) – A list of names for the targets of this Task.

    • -
    • hidden_size – The number of columns in the output of -the last latent layer of Model using this Task. -Available through Model.nb_outputs

    • -
    • args (Any) –

    • -
    • kwargs (Any) –

    • -
    -
    -
    Return type:
    -

    object

    -
    -
    -
    -
    -nb_inputs()[source]
    -

    Return number of inputs assumed by task.

    -
    -
    Return type:
    -

    int

    -
    -
    -
    -
    -
    -forward(x, jacobian)[source]
    -

    Forward pass.

    -
    -
    Return type:
    -

    Union[Tensor, Data]

    -
    -
    Parameters:
    -
      -
    • x (Tensor | Data) –

    • -
    • jacobian (Tensor | None) –

    • -
    -
    -
    -
    -
    -
    -compute_loss(prediction, jacobian, data)[source]
    -

    Compute loss for normalizing flow tasks.

    -
    -
    Parameters:
    -
      -
    • prediction (Tensor) – transformed sample in latent distribution space.

    • -
    • jacobian (Tensor) – the jacobian associated with the transformation.

    • -
    • data (Data) – the graph object.

    • -
    -
    -
    Return type:
    -

    Tensor

    -
    -
    Returns:
    -

    the loss associated with the transformation.

    -
    -
    -
    -
    +
    +

    task

    diff --git a/api/graphnet.models.utils.html b/api/graphnet.models.utils.html index 1499ab427..58f115973 100644 --- a/api/graphnet.models.utils.html +++ b/api/graphnet.models.utils.html @@ -386,43 +386,11 @@ - - @@ -468,18 +436,7 @@ @@ -489,69 +446,8 @@
    -
    -

    utils

    -

    Utility functions for graphnet.models.

    -
    -
    -graphnet.models.utils.calculate_xyzt_homophily(x, edge_index, batch)[source]
    -

    Calculate xyzt-homophily from a batch of graphs.

    -

    Homophily is a graph scalar quantity that measures the likeness of -variables in nodes. Notice that this calculator assumes a special order of -input features in x.

    -
    -
    Return type:
    -

    Tuple[Tensor, Tensor, Tensor, Tensor]

    -
    -
    Returns:
    -

    Tuple, each element with shape [batch_size,1].

    -
    -
    Parameters:
    -
      -
    • x (Tensor) –

    • -
    • edge_index (LongTensor) –

    • -
    • batch (Batch) –

    • -
    -
    -
    -
    -
    -
    -graphnet.models.utils.calculate_distance_matrix(xyz_coords)[source]
    -

    Calculate the matrix of pairwise distances between pulses.

    -
    -
    Parameters:
    -

    xyz_coords (Tensor) – (x,y,z)-coordinates of pulses, of shape [nb_doms, 3].

    -
    -
    Return type:
    -

    Tensor

    -
    -
    Returns:
    -

    Matrix of pairwise distances, of shape [nb_doms, nb_doms]

    -
    -
    -
    -
    -
    -graphnet.models.utils.knn_graph_batch(batch, k, columns)[source]
    -

    Calculate k-nearest-neighbours with individual k for each batch event.

    -
    -
    Parameters:
    -
      -
    • batch (Batch) – Batch of events.

    • -
    • k (List[int]) – A list of k’s.

    • -
    • columns (List[int]) – The columns of Data.x used for computing the distances. E.g., -Data.x[:,[0,1,2]]

    • -
    -
    -
    Return type:
    -

    Batch

    -
    -
    Returns:
    -

    Returns the same batch of events, but with updated edges.

    -
    -
    -
    +
    +

    utils

    diff --git a/api/graphnet.training.callbacks.html b/api/graphnet.training.callbacks.html index 30f76fb50..64fa5e7e2 100644 --- a/api/graphnet.training.callbacks.html +++ b/api/graphnet.training.callbacks.html @@ -772,8 +772,8 @@
    Parameters:
      -
    • trainer (Trainer) – The trainer.

    • -
    • graphnet_model (Model) – The model.

    • +
    • trainer (pl.Trainer) – The trainer.

    • +
    • graphnet_model (Model) – The model.

    • stage (Optional[str], default: None) – The stage of training.

    @@ -789,8 +789,8 @@
    Parameters:
      -
    • trainer (Trainer) – Trainer object.

    • -
    • graphnet_model (Model) – Graphnet Model.

    • +
    • trainer (pl.Trainer) – Trainer object.

    • +
    • graphnet_model (Model) – Graphnet Model.

    Return type:
    @@ -806,8 +806,8 @@
    Parameters:
      -
    • trainer (Trainer) – Trainer object.

    • -
    • graphnet_model (Model) – Graphnet Model.

    • +
    • trainer (pl.Trainer) – Trainer object.

    • +
    • graphnet_model (Model) – Graphnet Model.

    Return type:
    @@ -823,8 +823,8 @@
    Parameters:
      -
    • trainer (Trainer) – Trainer object.

    • -
    • graphnet_model (Model) – Graphnet Model.

    • +
    • trainer (pl.Trainer) – Trainer object.

    • +
    • graphnet_model (Model) – Graphnet Model.

    Return type:
    diff --git a/api/graphnet.training.html b/api/graphnet.training.html index d33bd01fc..add559a4a 100644 --- a/api/graphnet.training.html +++ b/api/graphnet.training.html @@ -449,15 +449,7 @@
  • VonMisesFisher3DLoss
  • -
  • utils -
  • +
  • utils
  • weight_fitting
    • WeightFitter
    • Uniform
    • diff --git a/api/graphnet.training.utils.html b/api/graphnet.training.utils.html index a2ddce336..cd1922ff0 100644 --- a/api/graphnet.training.utils.html +++ b/api/graphnet.training.utils.html @@ -365,70 +365,11 @@ - -
    • @@ -467,24 +408,7 @@ @@ -494,143 +418,8 @@
      -
      -

      utils

      -

      Utility functions for graphnet.training.

      -
      -
      -graphnet.training.utils.collate_fn(graphs)[source]
      -

      Remove graphs with less than two DOM hits.

      -

      Should not occur in “production”.

      -
      -
      Return type:
      -

      Batch

      -
      -
      Parameters:
      -

      graphs (List[Data]) –

      -
      -
      -
      -
      -
      -class graphnet.training.utils.collator_sequence_buckleting(batch_splits=[0.8])[source]
      -

      Bases: object

      -

      Perform the sequence bucketing for the graphs in the batch.

      -

      Set cutting points of the different mini-batches.

      -

      batch_splits: list of floats, each element is the fraction of the total -number of graphs. This list should not explicitly define the first and -last elements, which will always be 0 and 1 respectively.

      -
      -
      Parameters:
      -

      batch_splits (List[float]) –

      -
      -
      -
      -
      -
      -graphnet.training.utils.make_dataloader(db, pulsemaps, graph_definition, features, truth, *, batch_size, shuffle, selection, num_workers, persistent_workers, node_truth, truth_table, node_truth_table, string_selection, loss_weight_table, loss_weight_column, index_column, labels)[source]
      -

      Construct DataLoader instance.

      -
      -
      Return type:
      -

      DataLoader

      -
      -
      Parameters:
      -
        -
      • db (str) –

      • -
      • pulsemaps (str | List[str]) –

      • -
      • graph_definition (GraphDefinition) –

      • -
      • features (List[str]) –

      • -
      • truth (List[str]) –

      • -
      • batch_size (int) –

      • -
      • shuffle (bool) –

      • -
      • selection (List[int] | None) –

      • -
      • num_workers (int) –

      • -
      • persistent_workers (bool) –

      • -
      • node_truth (List[str] | None) –

      • -
      • truth_table (str) –

      • -
      • node_truth_table (str | None) –

      • -
      • string_selection (List[int] | None) –

      • -
      • loss_weight_table (str | None) –

      • -
      • loss_weight_column (str | None) –

      • -
      • index_column (str) –

      • -
      • labels (Dict[str, Callable] | None) –

      • -
      -
      -
      -
      -
      -
      -graphnet.training.utils.make_train_validation_dataloader(db, graph_definition, selection, pulsemaps, features, truth, *, batch_size, database_indices, seed, test_size, num_workers, persistent_workers, node_truth, truth_table, node_truth_table, string_selection, loss_weight_column, loss_weight_table, index_column, labels)[source]
      -

      Construct train and test DataLoader instances.

      -
      -
      Return type:
      -

      Tuple[DataLoader, DataLoader]

      -
      -
      Parameters:
      -
        -
      • db (str) –

      • -
      • graph_definition (GraphDefinition) –

      • -
      • selection (List[int] | None) –

      • -
      • pulsemaps (str | List[str]) –

      • -
      • features (List[str]) –

      • -
      • truth (List[str]) –

      • -
      • batch_size (int) –

      • -
      • database_indices (List[int] | None) –

      • -
      • seed (int) –

      • -
      • test_size (float) –

      • -
      • num_workers (int) –

      • -
      • persistent_workers (bool) –

      • -
      • node_truth (str | None) –

      • -
      • truth_table (str) –

      • -
      • node_truth_table (str | None) –

      • -
      • string_selection (List[int] | None) –

      • -
      • loss_weight_column (str | None) –

      • -
      • loss_weight_table (str | None) –

      • -
      • index_column (str) –

      • -
      • labels (Dict[str, Callable] | None) –

      • -
      -
      -
      -
      -
      -
      -graphnet.training.utils.get_predictions(trainer, model, dataloader, prediction_columns, *, node_level, additional_attributes)[source]
      -

      Get model predictions on dataloader.

      -
      -
      Return type:
      -

      DataFrame

      -
      -
      Parameters:
      -
        -
      • trainer (Trainer) –

      • -
      • model (Model) –

      • -
      • dataloader (DataLoader) –

      • -
      • prediction_columns (List[str]) –

      • -
      • node_level (bool) –

      • -
      • additional_attributes (List[str] | None) –

      • -
      -
      -
      -
      -
      -
      -graphnet.training.utils.save_results(db, tag, results, archive, model)[source]
      -

      Save trained model and prediction results in db.

      -
      -
      Return type:
      -

      None

      -
      -
      Parameters:
      -
        -
      • db (str) –

      • -
      • tag (str) –

      • -
      • results (DataFrame) –

      • -
      • archive (str) –

      • -
      • model (Model) –

      • -
      -
      -
      -
      +
      +

      utils

      diff --git a/api/graphnet.utilities.config.base_config.html b/api/graphnet.utilities.config.base_config.html index fe7430500..a43d9ac48 100644 --- a/api/graphnet.utilities.config.base_config.html +++ b/api/graphnet.utilities.config.base_config.html @@ -369,6 +369,8 @@
    • as_dict()
    • +
    • model_computed_fields +
    • model_config
    • model_fields @@ -407,6 +409,13 @@ as_dict() +
    • +
    • + + + model_computed_fields + +
    • @@ -554,6 +563,8 @@
    • as_dict()
    • +
    • model_computed_fields +
    • model_config
    • model_fields @@ -584,8 +595,7 @@

      Create a new model by parsing and validating input data from keyword arguments.

      Raises [ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.

      -

      __init__ uses __pydantic_self__ instead of the more common self for the first arg to -allow self as a field name.

      +

      self is explicitly positional-only to allow self as a field name.

      @@ -626,6 +636,11 @@
  • +
    +model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}
    +

    A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

    +
    +
    model_config: ClassVar[ConfigDict] = {}

    Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

    diff --git a/api/graphnet.utilities.config.dataset_config.html b/api/graphnet.utilities.config.dataset_config.html index 80a4de048..524f74d53 100644 --- a/api/graphnet.utilities.config.dataset_config.html +++ b/api/graphnet.utilities.config.dataset_config.html @@ -409,6 +409,8 @@
  • as_dict()
  • +
  • model_computed_fields +
  • model_config
  • model_fields @@ -542,6 +544,13 @@ as_dict() +
  • +
  • + + + model_computed_fields + +
  • @@ -715,6 +724,8 @@
  • as_dict()
  • +
  • model_computed_fields +
  • model_config
  • model_fields @@ -913,6 +924,11 @@
  • +
    +model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}
    +

    A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

    +
    +
    model_config: ClassVar[ConfigDict] = {}

    Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

    diff --git a/api/graphnet.utilities.config.model_config.html b/api/graphnet.utilities.config.model_config.html index b49f3c48e..39894fd70 100644 --- a/api/graphnet.utilities.config.model_config.html +++ b/api/graphnet.utilities.config.model_config.html @@ -390,6 +390,8 @@
  • as_dict()
  • +
  • model_computed_fields +
  • model_config
  • model_fields @@ -432,6 +434,13 @@ as_dict() +
  • +
  • + + + model_computed_fields + +
  • @@ -572,6 +581,8 @@
  • as_dict()
  • +
  • model_computed_fields +
  • model_config
  • model_fields @@ -652,6 +663,11 @@
  • +
    +model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}
    +

    A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

    +
    +
    model_config: ClassVar[ConfigDict] = {}

    Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

    diff --git a/api/graphnet.utilities.config.training_config.html b/api/graphnet.utilities.config.training_config.html index 319a33188..fb1647bf3 100644 --- a/api/graphnet.utilities.config.training_config.html +++ b/api/graphnet.utilities.config.training_config.html @@ -406,6 +406,8 @@
  • dataloader
  • +
  • model_computed_fields +
  • model_config
  • model_fields @@ -449,6 +451,13 @@ dataloader +
  • +
  • + + + model_computed_fields + +
  • @@ -556,6 +565,8 @@
  • dataloader
  • +
  • model_computed_fields +
  • model_config
  • model_fields @@ -584,8 +595,7 @@

    Create a new model by parsing and validating input data from keyword arguments.

    Raises [ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.

    -

    __init__ uses __pydantic_self__ instead of the more common self for the first arg to -allow self as a field name.

    +

    self is explicitly positional-only to allow self as a field name.

    Parameters:
      @@ -613,6 +623,11 @@ dataloader: Dict[str, Any]
    +
    +model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}
    +

    A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

    +
    +
    model_config: ClassVar[ConfigDict] = {}

    Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

    diff --git a/genindex.html b/genindex.html index 7cb4459cb..e743f2336 100644 --- a/genindex.html +++ b/genindex.html @@ -339,7 +339,6 @@

    Index

    | N | O | P - | Q | R | S | T @@ -347,20 +346,19 @@

    Index

    | V | W | X - | Z
  • A

    + -
    @@ -388,13 +378,9 @@

    B

  • backward() (graphnet.training.loss_functions.LogCMK static method)
  • BaseConfig (class in graphnet.utilities.config.base_config) -
  • -
  • BinaryClassificationTask (class in graphnet.models.task.classification)
  • + - @@ -613,29 +467,13 @@

    E

    + -

    G

    + -
    +
    • graphnet.data.sqlite.sqlite_utilities @@ -1078,50 +816,6 @@

      G

    • -
    • - graphnet.deployment.i3modules.graphnet_module - -
    • -
    • - graphnet.models - -
    • -
    • - graphnet.models.coarsening - -
    • -
    • - graphnet.models.components - -
    • -
      -
    • - graphnet.models.components.layers - -
    • -
    • - graphnet.models.components.pool - -
    • @@ -1150,111 +844,6 @@

      G

    • -
    • - graphnet.models.gnn - -
    • -
    • - graphnet.models.gnn.convnet - -
    • -
    • - graphnet.models.gnn.dynedge - -
    • -
    • - graphnet.models.gnn.dynedge_jinst - -
    • -
    • - graphnet.models.gnn.dynedge_kaggle_tito - -
    • -
    • - graphnet.models.gnn.gnn - -
    • -
    • - graphnet.models.graphs - -
    • -
    • - graphnet.models.graphs.edges - -
    • -
    • - graphnet.models.graphs.edges.edges - -
    • -
    • - graphnet.models.graphs.edges.minkowski - -
    • -
    • - graphnet.models.graphs.graph_definition - -
    • -
    • - graphnet.models.graphs.graphs - -
    • -
    • - graphnet.models.graphs.nodes - -
    • -
    • - graphnet.models.graphs.nodes.nodes - -
    • -
    • - graphnet.models.graphs.utils - -
    • @@ -1262,48 +851,6 @@

      G

    • -
    • - graphnet.models.standard_model - -
    • -
    • - graphnet.models.task - -
    • -
    • - graphnet.models.task.classification - -
    • -
    • - graphnet.models.task.reconstruction - -
    • -
    • - graphnet.models.task.task - -
    • -
    • - graphnet.models.utils - -
    • @@ -1353,13 +900,6 @@

      G

    • -
    • - graphnet.training.utils - -
    • @@ -1475,14 +1015,6 @@

      G

  • GraphnetEarlyStopping (class in graphnet.training.callbacks) -
  • -
  • GraphNeTI3Module (class in graphnet.deployment.i3modules.graphnet_module) -
  • -
  • group_by() (in module graphnet.models.components.pool) -
  • -
  • group_pulses_to_dom() (in module graphnet.models.components.pool) -
  • -
  • group_pulses_to_pmt() (in module graphnet.models.components.pool)
  • @@ -1529,16 +1061,12 @@

    I

  • I3GalacticPlaneHybridRecoExtractor (class in graphnet.data.extractors.i3hybridrecoextractor)
  • I3GenericExtractor (class in graphnet.data.extractors.i3genericextractor) -
  • -
  • I3InferenceModule (class in graphnet.deployment.i3modules.graphnet_module)
  • I3NTMuonLabelExtractor (class in graphnet.data.extractors.i3ntmuonlabelsextractor)
  • I3ParticleExtractor (class in graphnet.data.extractors.i3particleextractor)
  • I3PISAExtractor (class in graphnet.data.extractors.i3pisaextractor) -
  • -
  • I3PulseCleanerModule (class in graphnet.deployment.i3modules.graphnet_module)
  • I3PulseNoiseTruthFlagIceCubeUpgrade (class in graphnet.data.extractors.i3featureextractor)
  • @@ -1560,28 +1088,16 @@

    I

  • (graphnet.data.constants.TRUTH attribute)
  • -
  • IceCubeDeepCore (class in graphnet.models.detector.icecube) -
  • -
  • key (graphnet.training.labels.Label property) -
  • @@ -1647,10 +1155,6 @@

    L

    - +
    -
  • load_module() (in module graphnet.data.dataset.dataset) -
  • load_state_dict() (graphnet.models.model.Model method)
  • log_cmk() (graphnet.training.loss_functions.VonMisesFisherLoss class method)
  • -
    -

    Q

    - - -
    -

    R

    - + + -
  • sensor_index_name (graphnet.models.detector.detector.Detector property)
  • +
    -
  • set_number_of_inputs() (graphnet.models.graphs.nodes.nodes.NodeDefinition method) -
  • -
    @@ -2249,44 +1557,26 @@

    T

    - +

    U

    @@ -2303,14 +1593,10 @@

    U

    V

    -

    Z

    - - - -
    -
    diff --git a/objects.inv b/objects.inv index ea999733bc5b88030f053a5b7cbc7331c97b13f1..c0dee6598806a962b314082af150dd07b8d7edc6 100644 GIT binary patch delta 4729 zcmV-<5{B)SG{z;6cz@Y)<2aUm=T|VI`?0L)D#vtmR6k|f&MZgSnQkvLj|_-{B$O$F z1%PR5zGl9qzpSy50x6PML{fgS1@PW;F7|~W?uLM3n?n{C`(Jl>Rc0{$TMP>HUmN{k zv1PmLFY3|7U*n6YevsmvGLSRcuUm`sqpSb}{Q%bqG&vslFz{T~=&BN9A#~XnR7$ay|GB_Ig?bXfI%jcVa{Cyhu-#>l*dN+~j50BsP zZ=QdSaOUSEcNXB#m_+(8t z!0qL?=X)N1C*nggWWX+qK?NYZOtP>s=1AEmD}Road7q){!uN0RAg4ZP7rx{8Uz7i@4)th1d$u>;!vLOYc*)9Lz?IvNE zXFq{Sv1i+}*dYr)QKf%YYvh|AxLZP!`~bX@TxOxwG1LOy9+w#NJW=EDdAMO-?tf;d zMk#LzOL(S)>rLRfX~mBTNY^N1(E4gdOIbRyO6vCVi~Kb_No-)+!R`Tl9R=`PnPp8?=YLiA2QDKYI&LFirXUf;4Jc5ZO&p9~dCV*^EeEr8I=YO(6JX``^ zpWr*94jWKo)2hVJOHRsMWO0(>)of)wVbQuaQrBjAkgMiQ%5M9NXh1WZvzq=rV>Jdj z&ReXujAMkTOqSvABufbI7x2zG-<7y?W^3^RAo7O97SxtcbsRdtZo(lZK4PPMlTfI4 zsuKb!&VE>D!43${sqII2l7CO0st1pTZ7wy;V%n%SEwq4hrSmcl78%~}U_P|;uVwl( zp2xwA_wtaz;n#XCL1)AO=x~n3wL#WdABMU7NEZ6QST)1zF640;F?j=IH51zgV$MSL zhsjpox!+M~Da~~*ov-hy3^-2*RI{I@7pmB&>x%erFDei$sr&GJ&~YE34V~|g zSD$|V@4q=Wm^Pg!Pk&f@IhFL(=5i|kNv-8%@>3eispP(GC6w=>(Q-zs$?~7IEy{n^ z)~BTIE5soi4L}RhV$D;*3pQV)jkP^_z-Ab%#>Pu16Z`1m1m&2}V#{`HyfI7_A5ZjG z9r3ik%E+f{#29b677Q)rhPAlDIm0FS!h-|*X2-yeh$1%mS%362GO|DKPGk6GWQPBR zXtQM_W{L?YEq;>`bR2V4b5F`*1+>9^yTC4}MTTAKStVwvXtkL5k^FpZ7Sy1XO?(^h zFB1S0$onXmv*9!EHcUnU$_KNvlIYLLS|SBmwgl-bbwI0SeQ+0F^|Kp5vi%doMWJo) zYy*8{K`aZLCx4=CzZS$FMN+(T(x=f`YP|RNv_UCFh1092O zJ}%jYXCgD2?evUubWj6)eA-LcSd8jhafgvh2|5g0T7U2w5zECaozR1r^1Jx4@fNRe zKlB>M2NqLW-|Xd-sSO2}%xx-sM&j$gE5c)M_?!qA`#VgjLk@NDIuXuDbyKFiO+%1h zBXRLv3Mb~f!<(!7-zO(nh$cDwhIlk8>$~W2m@wT=UN)3|F>cJ8fo6`(yMMZwIO86t zr}!V7-+%K7ApL^q7Ls4CXQjKGmu_}4F0o0{-RqRKu%9&n1s@gP&qNO2LCQwuRvSEt zbZ=K&u?s6V&+Sp47@Vt-<_;)7fQbUYn=5i}C(8)9-5(?Q)` z-l4qTi+UnHYA$MAvB%4JO`Oj1MS+WC1=pa=*k~liizrJCR+29DS4+yz5WMjXtuU^| zXa4)DrnQuHA8QER0#M5fsEK%2<*t~WbohniM5OZ79gp7?rMUEFjbOHN?WQI%q^f4; z;eVew{bD;5WLe!yi=DP|_#%r_>D9lIR41Apdl6I3qZ?_cUA3s)dXi)2KuV))?%R!; zklg8%!i4u_~vZZj4(->$RhvQ(jxWA&I=y^Mjc20qU*wl<(vQ=(|$RTQw+zt1}y5xY0PD6|K)}&uYy1Tj~dCq)&Wa zPCk9Ux`*m^nsdxor42J1P+!T@TrM~-d;JY)kC~1u6I0D&UD8mK$EtnTB*(;IjDHK? z?dQ??y;nn{QOlgJza>6H+DDi=j&6(pq;X^u&Y25Ue>;CW4CrM~qLw{uyUd;YS6^!J zW@PZSZ=Z%opIT{D+onP?hx!Iy&u!BNyE|(wD6;)jjMd9@|9MAC(|@Cxv^Ae~*mnZbX~8Mfa?&PVbE$Xl?w$YmmO$}^ zKtlB=0^)mE1oIT0+bn%Bdy)=&KxOPNzrZ_{LR!^@dzWo*pZD=;bB2d#B`SQI|wjqJL||$1v^v z*wOq3iv@nmS8wp0!QAHgA`D(kHw^dCqk|i6p~t~Sy8;vDRG3=Sr7&wzaB#0sfb`6?#+d9 z2iVqW?3IEp{jw5-1x+&a>NtuR-~tsaamixJR==)r8eecV5h0()pMS*9$i!&~n)pio z;kF^IdvZI8Be+v5~je zaSsD}T$aaOPa-c*;2uVFxDb!qv5|*&ZXfElQ}4JL7rFVSbY^UR#u?Ya$lvjE0B756 zbZkUKUS5X-$PIVJvHBhPIQ|S^JU9%E<RyFsJ$Dkc&kZ^j`O9ao3Z`*vW<4QZLl>CYEKv3+vJi+{DHtjJrDQOEVWy)626UWucA zcd7HyReaPgH%=``2gzy}uEIUQy8`EMXxHdSvI4Hc7T`q&_Cw=fpMs)Mziku+#+e6! z5Z9dv{}l~gz->cq__T?uP~_wX#~C|>q=-+T^(n znv3XGn1APe7-G-!$I!O!v1q+HDgwQ@t9=ev8|Z?oMRf_PfDJeK1Lc%?peuz2hl;W? zy-GJI2}2C~@EK}NR^vf0w`1wrOIU=hbx=teJvJQxTZlxsLP}Y^%_n|ZaRbo(Id&+2 z#c%W#dGXX*dPu5*LwrZy5W5k!)r_JsVy(o~hkwqw{KYqduE=bK%Lz_r^dZ!)C}Qy|>UAJ6 z41Zu=losni)PHmSFj(V${kRH6oj0}hA~o*WkE>8*xzX4T*S!2Vu7lCg-M)5M;|}0h z{f=BNC3FBjzQizY#`O>lSG=ZJZ`JF&A<+ukaRFc4jElNYDD*RDk zp=j_dvIDGfNI6!&Bj@{&+~M1oD8)uZQ376&1~F^i`+j7 zbEiG}lq@!5BByU{+}X^Jb7CEcyiUZsb9Ei1k6V$E^{~7HU+ZLjd(y$+M#P2P8ibe2eiq2I5VXJbL_w$*Ta0OWACtN0x8!`PaC45)I~}pQEvUTOmYO zc*h&g1ubAIX0x(qiW!bkzAE{AbANwPP0xSgQ4EqDTp_^oK%Vaxm-GT&Nw`ETych{U zwnbE;{4;KEnaY!iRA|-xuFw$L%MxW$UF_*)@IC(e?VH{{)oG$gJoqUtwtI?FdVvXo z+@O`QZD}yple8VkfmoWWv#`A?Lw{-tg+_fEVq;f&oB_X*P z+F0=b<@7} zn=&%0!jXZ9o7}-!>@TVZO8j1Jt1EWnsYUH!&$c{)4Q&bmNw-Z!6{mEb)bj^kWYK>B Hdv?jjLK{hk delta 6615 zcmV;|87St)C6zRgcz?@s<2aVx>njw|y{y!9wPPkGs#i|inWZV)>9(C+5wl@HBqX8C zR|24I)z|d5%$E&bq)3S*E+CTfBGUqR?>QI1#ghQIp(x*`n9uXwFG&^^F`oZzj&kxJ z8}neXBgW=G+i@X)i|d=4hpTUoHxe0fPRXj^crf(k)y>uS&o}@5U*o|4cKP+|?MS9S zJbt^odH(h0`_nl1-ygm{US1Dt{PW}Y=U)f&5rsC6gJ|e+yhCG;;2IiuB)`z$!#Rcg zPYAtY$}zRHM}G}jOPkb`y%wRXI!m|XwU9WWFYr4KWp>E4V7|?wacI^g#sMqVYw|9s za+>Ah!EPL~4dy|D@?c%0AtzZncH$CDH#m?oS_KRY-vG2 zn`H#0kKR_`{oYf@27)fuG)t7%Gf}feQrBby@P9BsW$MU*|XF*DM@#;lY!e*?FM zvqSqd?!|)+JNZVhC8wI;QT06SVLxJG8by2VYky&2W*cGs$FZ1Obx+}4JpJ=l1f5E_ z$B2{IJqp*!6#UL93VB9t%Y1u5OV-u|$wd`_qt}$Ym=?>6n>uTE+4Yh7uq{%Q*!!fx zS>@myG6^1qY&}QlTQ0A zhNV0yvQ$munODZN0=Q&NJHYbd{`pSC-+#(@pA6}-%VLlig^MzY{rZ?AWtXftRu*lB ziSx;W4r6Rq5QhBHURQKVcSBL6dHHndmEL0mEX-m!&-@yMrlw134>Xq-|NMUQ^vx%| zZ8liQDdyBK!A(t<1RrQFFCJxtPlDTQu#mGnCMiyGzv_If?UL{o?d8RjioAQcS$|&n zWI>lj7E{zRomUn#HC>W_pt-zIqaL3`x7lDLFA~4x*7TebTh|jw{eI__&?EL<+Jcjq zaDp9H3n~?O;`S86zSPbsZ zXM%W~vOHrr5O(k&TdRy=(;Bp4gf#PZ z1%GzA=Oask7rbKuN~0i`GfwH4m`!F1-{pSAuM~YsUn{OCMlV6Q3u6)nVkT5j26?|? zGsVe<5Xow{`a{&4fM+86DGu@--=4(|RrrA}{j*x5b|T?qg=z4LL?gM1eSh82*8))= zS6L?VMEAqz;l^o^OioQgQ4?19ObIuOz*E!8A0v>iNzAeJ)r^s{G-RdJ^5Tp7)jvpV zVA{d%5PKN}@VAQ`FH6 zT7rJ9f$SH|atuB~-wIki)JEm6x*xPJw1dGDo$bB1E# zEF!8+O;dBKl?qj%#3WTnPGy69xI)68@H=6ERkERJb>gQbr$s8WI0&<7va%krXrYx9 zS}pf-<-ktcZ66W!X!>)OW7OwN_d&pMi`AB4jF8S`>Hbc#go<_{OThWA#le|v#B)UG z8z9VwtWn#c7DoYK7X8cNJrlVmgi?;sK6(`X~LoyG+gVFn^QBM^fR=R;azG1M1?k zoZnqFm|C-jnS_6Ekc)uJICUB zm)?l{kwZ3n_J6n1IrBJF!P5wz93zNm7}C&xB{ACaX>~ebN>QW(;lcgp6X; zCwgcx13`kYta`&!=UEoJ)M_d3Qm2*thGs<`tahNru7wKDq4$o+37eUPO+}X{d`!B8 zq(NC!V{-)WW3n(Fjh>Q{W8{<+$@nQT9BR2SBCH&=O@Bc-Xy!$t5$oU9dLU?RWGX>- zXn&5yb&agAv+Lk;BUzXOQ{~Hp-u8bWMqt-E4BG~0f~9^qx$E5PptNE%%`&P7;DPPL z?HJ5?#?(a$ikwZyu9ucZPq*2>1+S1s0bUZzv_4jVY7Y&8aJ_ zRQ{7{%gN-YRF+f8UF(W3-$SG34Ccw3G_}@5A%A17c~VAh7wf$-{#e!pcOh3N?|bgE z>VgO2f@hy#f@qh@cP*d{F|wcxDP_$&1W1N7;DNYK8R9|^{6SL2U9v@MxMYseFiH8J z<^9vMLq65{+=+ZL&V}(s4Lk#GUVDHuv8Lygm32Ln&gbRxH<$c8qW0wU%33fvv^9jb zh=20m*ZX@&Mpcj&ET;?rgQly1qDxTwXg@iFU#Uy{O{~*N1R?{ zt`0U3tJ{cXP-?j+CoR;i5ZSsmV4E=Y(uDF$CT5|ldW$%=8SIj&&BttHH+#S|#D6I# zF}{imR&IY9m{4xH2Uk)h46UA8IMk*GtVj|>$)7lQBYYd=RoWQ53#`#wc{hk}1#Lx- ze-v`d`Z#1_94OMV-pQBBBpfSAF@6tNM)_dq>W{VT)gNo2YpJ^uvCm6ufEFa!mp$BJ zE(LZ+}2joDY zQ8Qo+ItVe_B0()0Fq}`5QjVy+1Z~<>a*9YgQTim6I%+vtmcF8x2!EB%Gk+*4H+N_C z_CQj|+V3bK)wz&02@z=dq(#SvXYrZH%TTWxkgb= zSc7_?bNMGp3ch_jVz;KQjeqr#;u8-k8-_sCNj!3F6tv!nm)qiaGpq|ub;m7f( zx^?I9s^_b-^jC+pAHsJ!jXIaR92qpNhb&VpJb{yt@To^N-PC9H6o6sVdrOZ|(|XK< zxe{PvZ)vAJv!_^$n%+w~44T$M5nLVS&py|D$#h7V5;$QMg zcb34nm#F`iYU429fnT`p@!`Pz9%b3jbULf+4k~{}m(!z8EVmfZ6i0)XQVrK_tEo2& zbnm4%!DwYr?jidUXK#ZS#3A^IrEx?xN|79~G|FXop0R333xECuWiyFG51v=kw)NAs z@rnt)kJ}x4)FRe9YygrWo+=XynnMwMRxn>X$*fKnBjL!Hd{VmrqltI#cwKt4P&l8))N$Ygp9#mE($@- z;|$_wrI^LhDlz#Z`1!hCP`y?*@om6=9|4$R6a``iKzFBLmS-Tkv$K@w&dEwbw07|d zg)a<%R>`{H>I)9Uvl~R?-4oC9yzd(=U1VM?3o)FKb$%8E8BGtGXY!*llQP$i zs!T}CFfa1q$sPy}vHW7*ElI@y_x5pcpE+V!s8JyuAuTl9P|Vn8M2Ans9$VQjZ?q&K z8#NINd9gt`XbTU^s>2hJ)vE3Ij8imFJ$z!wPWe!b`w5(-=hA|JZp#Q( zJ8kW?wSNxXi+L;m42`#V!Mna`pbKoqSWu2hK$+T5fMjk{!JI9kBKAg%iL!imYnDhX zIn=@nBAk$_x^C^-Oj1YYyUjLNcfXBJu#i=_!R%j(mOE#WM2r|V*2;%N$D=8rGJ~8Oh{~kFnJl%3%gkZl#5>R{Y>Qe z9ff>QZp%_?Qx4D@o7K)NR%@N>v~NkG4cv~J?Ju$lRz>&{V-A}714-m$XaC-S4QYRZ zUa-2PL|&EDzX0Y1tw+SQasN16>h>ft5HDgrQ1<9m#=yF8+@Sz)%L9eae`J`Zx7nNB zZGYORKpfvwBKpf@L(N_an(4=(N@ua*XgPBY{>>TqV01DXER2Bn;7sFxv>pViz#MC^TEt1w;rlq=)+i5eZu=} z*7%Q2Mlr-^kTS3#@*@0z)57vC%IVDb0A6DKGUw<9$S)`)C_k@jFpQ%!8xZf`i=WR5 z{pHe+Va)k!>IbQ$Phu2IFF#-1VSkR!H9Ax_Ol*J}{T~m`!IMmP0}lI4;5?wN<}eFr zsHrQlU1tEt#D0tm*EQC``JLw;gHfxTRv!&IQ`-FpY6F|#GejH8gtCpB-uFFPwv(t; zt9DrC;5N;bS}wWwzEAAZaA?O$jdEd7N~W^im6_?dFxX&+)0R?GaT0bwIe#UgDaer- zW5RTCvSOtfM^7ZDvMw?lpfr-3O!|+8ox)gs?ZUaQKhSijCWo5OChXejw_0!t^`f~) z0^ax!ZwZ#?9|C3$Km=lTO3JgI=QPW_@js^t{-cDkZ1EW`3~!4*aXg+C5e_pdcU*zC zY3!vT4MT);ibHcgAP8y6Tq=ZY+ zTk#jPeXM~E0$qeuM2CGhESv-%;Z?C22^&+I(UGuawGOsoR2`eINZR)jc(9C4K+<(K zQVuc9Z}BT0%W)W|7$xHu!5gC00rufjVbFuD_xcq-ibtGd#J`Z358gJ^p=DJt3}aa@ zuaTiD%;fwzdjr2(f`2e3h;KoCuM&i`%UTfUEQrYquotO;OH{PV3O;8%`X$Q3`6rTxrTjUo<1_^8_*(v9*^pkS`}vSuo9@!9Bs4-5Y~sjSZYP9%sI}#Ob-PRo;(E`%xRBdo!!34sVJ+2OSfU=$ftPAboM1Pso zylByb8Q(bO?7$7m{++|5`UImMiMkvA1f@@A}!oaHe|aguu#{jqXs#PJiGA!?tC zj}cA9(tu{HmApv2sB3B!aV@T&c7OH`%Gf-LH?M*~9)ERk7n}36?wmF4a$s3SKrL8P zlz1grD{5Z-8%oJ{xfi3-(}e103!k3HS-_2uF4-SEO)1!Daw(QH8CJwL}=um19_caJW8iyukFIsv8 z&jLs(^?xWfJ*qdDKr7+0r!zr2x{2(|vp%hQa=P=Srg*qX#fkMmS(6_`&RXvA z?PM|6{3;y%yG>p6uJT~AS~#^J?Ip`@xD0myCx3aC;=Wa*BgqoD3|oK~G1~R@gIx-m zM*p@^kYHCf^+JjDRVCmXRSca`WsHJKxHtPXf$V5OP zm8@CjlRvGv9_a2I2}xhFH}*oDcq%Q^C6&QGzM})FFv3H%TG7;Et;EcQ4qX272pkky z$!cKr(~9eX?!uvO3xlEWZ+%ju`F33O$A1Q>{?rCCtw`0qH6X~QbU7rh$IZIFO<{ws zK3y@FzdB*`*$c8JdLtPKwW(q{l1AJN_^mGxUCZae=FPZS?*}|UB!KJ!d0oG|sgGK8 zAynVAn9EQ%aH}cGD5i|IGD{#W2TUzGzn3>Db zPB>}Hk*xFM47x=f#L~8{eSQ=@>)$^OV5;x5o}0j|f6p!ewZ6M|ZUVFZ-DUvP`o6Te3C#L- zh5<15JILnhr^4%45^XGgG8)e0<%VW6T}^Z!t0k|h@C+B9a3Y!&y;QAw)_>+UwZHsz z{(t|Tt>f%1+{*7Lbc_8edY@0R+AEs9V){z(+dNy(k$h97wjRG_^hE;A3sr5^r(N#x z^Dpz}PBfg#c8<->+X^WfWr?UbpIDAVIhs{1)0}5>l17CXZ|**o!}Fg+6vs5d5kVpk z)c$^X(JB&7A|&SWb4U`9Z+{6>DF2urE}5#GiA-u$?XJ|2+N*-ZN?oq$#rS>x_5R*0 zpPDo=A|L!T&$l~9LiQ=61Pgy zRblDwr+M3Qd@T*yE(IyfFviLU`|kyk^eBT5CIq7Bi5MKsHgZKb5>N?mctIJfoogw7 z&h}!)2Wt{~wXTIWeWe-K4B`+sP<$C>FC2UGw6 diff --git a/py-modindex.html b/py-modindex.html index 8212532e4..5e5896eb1 100644 --- a/py-modindex.html +++ b/py-modindex.html @@ -360,41 +360,6 @@

    Python Module Index

        graphnet.data.dataconverter - - -     - graphnet.data.dataloader - - - -     - graphnet.data.dataset - - - -     - graphnet.data.dataset.dataset - - - -     - graphnet.data.dataset.parquet - - - -     - graphnet.data.dataset.parquet.parquet_dataset - - - -     - graphnet.data.dataset.sqlite - - - -     - graphnet.data.dataset.sqlite.sqlite_dataset -     @@ -495,11 +460,6 @@

    Python Module Index

        graphnet.data.parquet.parquet_dataconverter - - -     - graphnet.data.pipeline -     @@ -540,36 +500,6 @@

    Python Module Index

        graphnet.deployment - - -     - graphnet.deployment.i3modules.graphnet_module - - - -     - graphnet.models - - - -     - graphnet.models.coarsening - - - -     - graphnet.models.components - - - -     - graphnet.models.components.layers - - - -     - graphnet.models.components.pool -     @@ -590,116 +520,11 @@

    Python Module Index

        graphnet.models.detector.prometheus - - -     - graphnet.models.gnn - - - -     - graphnet.models.gnn.convnet - - - -     - graphnet.models.gnn.dynedge - - - -     - graphnet.models.gnn.dynedge_jinst - - - -     - graphnet.models.gnn.dynedge_kaggle_tito - - - -     - graphnet.models.gnn.gnn - - - -     - graphnet.models.graphs - - - -     - graphnet.models.graphs.edges - - - -     - graphnet.models.graphs.edges.edges - - - -     - graphnet.models.graphs.edges.minkowski - - - -     - graphnet.models.graphs.graph_definition - - - -     - graphnet.models.graphs.graphs - - - -     - graphnet.models.graphs.nodes - - - -     - graphnet.models.graphs.nodes.nodes - - - -     - graphnet.models.graphs.utils -     graphnet.models.model - - -     - graphnet.models.standard_model - - - -     - graphnet.models.task - - - -     - graphnet.models.task.classification - - - -     - graphnet.models.task.reconstruction - - - -     - graphnet.models.task.task - - - -     - graphnet.models.utils -     @@ -735,11 +560,6 @@

    Python Module Index

        graphnet.training.loss_functions - - -     - graphnet.training.utils -     diff --git a/searchindex.js b/searchindex.js index 228159cba..d349ad633 100644 --- a/searchindex.js +++ b/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["about", "api/graphnet", "api/graphnet.constants", "api/graphnet.data", "api/graphnet.data.constants", "api/graphnet.data.dataconverter", "api/graphnet.data.dataloader", "api/graphnet.data.dataset", "api/graphnet.data.dataset.dataset", "api/graphnet.data.dataset.parquet", "api/graphnet.data.dataset.parquet.parquet_dataset", "api/graphnet.data.dataset.sqlite", "api/graphnet.data.dataset.sqlite.sqlite_dataset", "api/graphnet.data.extractors", "api/graphnet.data.extractors.i3extractor", "api/graphnet.data.extractors.i3featureextractor", "api/graphnet.data.extractors.i3genericextractor", "api/graphnet.data.extractors.i3hybridrecoextractor", "api/graphnet.data.extractors.i3ntmuonlabelsextractor", "api/graphnet.data.extractors.i3particleextractor", "api/graphnet.data.extractors.i3pisaextractor", "api/graphnet.data.extractors.i3quesoextractor", "api/graphnet.data.extractors.i3retroextractor", "api/graphnet.data.extractors.i3splinempeextractor", "api/graphnet.data.extractors.i3truthextractor", "api/graphnet.data.extractors.i3tumextractor", "api/graphnet.data.extractors.utilities", "api/graphnet.data.extractors.utilities.collections", "api/graphnet.data.extractors.utilities.frames", "api/graphnet.data.extractors.utilities.types", "api/graphnet.data.filters", "api/graphnet.data.parquet", "api/graphnet.data.parquet.parquet_dataconverter", "api/graphnet.data.pipeline", "api/graphnet.data.sqlite", "api/graphnet.data.sqlite.sqlite_dataconverter", "api/graphnet.data.sqlite.sqlite_utilities", "api/graphnet.data.utilities", "api/graphnet.data.utilities.parquet_to_sqlite", "api/graphnet.data.utilities.random", "api/graphnet.data.utilities.string_selection_resolver", "api/graphnet.deployment", "api/graphnet.deployment.i3modules", "api/graphnet.deployment.i3modules.deployer", "api/graphnet.deployment.i3modules.graphnet_module", "api/graphnet.models", "api/graphnet.models.coarsening", "api/graphnet.models.components", "api/graphnet.models.components.layers", "api/graphnet.models.components.pool", "api/graphnet.models.detector", "api/graphnet.models.detector.detector", "api/graphnet.models.detector.icecube", "api/graphnet.models.detector.prometheus", "api/graphnet.models.gnn", "api/graphnet.models.gnn.convnet", "api/graphnet.models.gnn.dynedge", "api/graphnet.models.gnn.dynedge_jinst", "api/graphnet.models.gnn.dynedge_kaggle_tito", "api/graphnet.models.gnn.gnn", "api/graphnet.models.graphs", "api/graphnet.models.graphs.edges", "api/graphnet.models.graphs.edges.edges", "api/graphnet.models.graphs.edges.minkowski", "api/graphnet.models.graphs.graph_definition", "api/graphnet.models.graphs.graphs", "api/graphnet.models.graphs.nodes", "api/graphnet.models.graphs.nodes.nodes", "api/graphnet.models.graphs.utils", "api/graphnet.models.model", "api/graphnet.models.standard_model", "api/graphnet.models.task", "api/graphnet.models.task.classification", "api/graphnet.models.task.reconstruction", "api/graphnet.models.task.task", "api/graphnet.models.utils", "api/graphnet.pisa", "api/graphnet.pisa.fitting", "api/graphnet.pisa.plotting", "api/graphnet.training", "api/graphnet.training.callbacks", "api/graphnet.training.labels", "api/graphnet.training.loss_functions", "api/graphnet.training.utils", "api/graphnet.training.weight_fitting", "api/graphnet.utilities", "api/graphnet.utilities.argparse", "api/graphnet.utilities.config", "api/graphnet.utilities.config.base_config", "api/graphnet.utilities.config.configurable", "api/graphnet.utilities.config.dataset_config", "api/graphnet.utilities.config.model_config", "api/graphnet.utilities.config.parsing", "api/graphnet.utilities.config.training_config", "api/graphnet.utilities.decorators", "api/graphnet.utilities.deprecation_tools", "api/graphnet.utilities.filesys", "api/graphnet.utilities.imports", "api/graphnet.utilities.logging", "api/graphnet.utilities.maths", "api/modules", "contribute", "index", "install"], "filenames": ["about.md", "api/graphnet.rst", "api/graphnet.constants.rst", "api/graphnet.data.rst", "api/graphnet.data.constants.rst", "api/graphnet.data.dataconverter.rst", "api/graphnet.data.dataloader.rst", "api/graphnet.data.dataset.rst", "api/graphnet.data.dataset.dataset.rst", "api/graphnet.data.dataset.parquet.rst", "api/graphnet.data.dataset.parquet.parquet_dataset.rst", "api/graphnet.data.dataset.sqlite.rst", "api/graphnet.data.dataset.sqlite.sqlite_dataset.rst", "api/graphnet.data.extractors.rst", "api/graphnet.data.extractors.i3extractor.rst", "api/graphnet.data.extractors.i3featureextractor.rst", "api/graphnet.data.extractors.i3genericextractor.rst", "api/graphnet.data.extractors.i3hybridrecoextractor.rst", "api/graphnet.data.extractors.i3ntmuonlabelsextractor.rst", "api/graphnet.data.extractors.i3particleextractor.rst", "api/graphnet.data.extractors.i3pisaextractor.rst", "api/graphnet.data.extractors.i3quesoextractor.rst", "api/graphnet.data.extractors.i3retroextractor.rst", "api/graphnet.data.extractors.i3splinempeextractor.rst", "api/graphnet.data.extractors.i3truthextractor.rst", "api/graphnet.data.extractors.i3tumextractor.rst", "api/graphnet.data.extractors.utilities.rst", "api/graphnet.data.extractors.utilities.collections.rst", "api/graphnet.data.extractors.utilities.frames.rst", "api/graphnet.data.extractors.utilities.types.rst", "api/graphnet.data.filters.rst", "api/graphnet.data.parquet.rst", "api/graphnet.data.parquet.parquet_dataconverter.rst", "api/graphnet.data.pipeline.rst", "api/graphnet.data.sqlite.rst", "api/graphnet.data.sqlite.sqlite_dataconverter.rst", "api/graphnet.data.sqlite.sqlite_utilities.rst", "api/graphnet.data.utilities.rst", "api/graphnet.data.utilities.parquet_to_sqlite.rst", "api/graphnet.data.utilities.random.rst", "api/graphnet.data.utilities.string_selection_resolver.rst", "api/graphnet.deployment.rst", "api/graphnet.deployment.i3modules.rst", "api/graphnet.deployment.i3modules.deployer.rst", "api/graphnet.deployment.i3modules.graphnet_module.rst", "api/graphnet.models.rst", "api/graphnet.models.coarsening.rst", "api/graphnet.models.components.rst", "api/graphnet.models.components.layers.rst", "api/graphnet.models.components.pool.rst", "api/graphnet.models.detector.rst", "api/graphnet.models.detector.detector.rst", "api/graphnet.models.detector.icecube.rst", "api/graphnet.models.detector.prometheus.rst", "api/graphnet.models.gnn.rst", "api/graphnet.models.gnn.convnet.rst", "api/graphnet.models.gnn.dynedge.rst", "api/graphnet.models.gnn.dynedge_jinst.rst", "api/graphnet.models.gnn.dynedge_kaggle_tito.rst", "api/graphnet.models.gnn.gnn.rst", "api/graphnet.models.graphs.rst", "api/graphnet.models.graphs.edges.rst", "api/graphnet.models.graphs.edges.edges.rst", "api/graphnet.models.graphs.edges.minkowski.rst", "api/graphnet.models.graphs.graph_definition.rst", "api/graphnet.models.graphs.graphs.rst", "api/graphnet.models.graphs.nodes.rst", "api/graphnet.models.graphs.nodes.nodes.rst", "api/graphnet.models.graphs.utils.rst", "api/graphnet.models.model.rst", "api/graphnet.models.standard_model.rst", "api/graphnet.models.task.rst", "api/graphnet.models.task.classification.rst", "api/graphnet.models.task.reconstruction.rst", "api/graphnet.models.task.task.rst", "api/graphnet.models.utils.rst", "api/graphnet.pisa.rst", "api/graphnet.pisa.fitting.rst", "api/graphnet.pisa.plotting.rst", "api/graphnet.training.rst", "api/graphnet.training.callbacks.rst", "api/graphnet.training.labels.rst", "api/graphnet.training.loss_functions.rst", "api/graphnet.training.utils.rst", "api/graphnet.training.weight_fitting.rst", "api/graphnet.utilities.rst", "api/graphnet.utilities.argparse.rst", "api/graphnet.utilities.config.rst", "api/graphnet.utilities.config.base_config.rst", "api/graphnet.utilities.config.configurable.rst", "api/graphnet.utilities.config.dataset_config.rst", "api/graphnet.utilities.config.model_config.rst", "api/graphnet.utilities.config.parsing.rst", "api/graphnet.utilities.config.training_config.rst", "api/graphnet.utilities.decorators.rst", "api/graphnet.utilities.deprecation_tools.rst", "api/graphnet.utilities.filesys.rst", "api/graphnet.utilities.imports.rst", "api/graphnet.utilities.logging.rst", "api/graphnet.utilities.maths.rst", "api/modules.rst", "contribute.md", "index.rst", "install.md"], "titles": ["About", "API", "constants", "data", "constants", "dataconverter", "dataloader", "dataset", "dataset", "parquet", "parquet_dataset", "sqlite", "sqlite_dataset", "extractors", "i3extractor", "i3featureextractor", "i3genericextractor", "i3hybridrecoextractor", "i3ntmuonlabelsextractor", "i3particleextractor", "i3pisaextractor", "i3quesoextractor", "i3retroextractor", "i3splinempeextractor", "i3truthextractor", "i3tumextractor", "utilities", "collections", "frames", "types", "filters", "parquet", "parquet_dataconverter", "pipeline", "sqlite", "sqlite_dataconverter", "sqlite_utilities", "utilities", "parquet_to_sqlite", "random", "string_selection_resolver", "deployment", "i3modules", "deployer", "graphnet_module", "models", "coarsening", "components", "layers", "pool", "detector", "detector", "icecube", "prometheus", "gnn", "convnet", "dynedge", "dynedge_jinst", "dynedge_kaggle_tito", "gnn", "graphs", "edges", "edges", "minkowski", "graph_definition", "graphs", "nodes", "nodes", "utils", "model", "standard_model", "task", "classification", "reconstruction", "task", "utils", "pisa", "fitting", "plotting", "training", "callbacks", "labels", "loss_functions", "utils", "weight_fitting", "utilities", "argparse", "config", "base_config", "configurable", "dataset_config", "model_config", "parsing", "training_config", "decorators", "deprecation_tools", "filesys", "imports", "logging", "maths", "src", "Contribute", "About", "Install"], "terms": {"graphnet": [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 35, 36, 37, 38, 39, 40, 41, 44, 45, 46, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 101, 102, 103], "i": [0, 1, 8, 10, 12, 14, 16, 27, 28, 29, 30, 35, 36, 39, 40, 44, 46, 49, 55, 56, 62, 64, 67, 68, 73, 74, 75, 78, 80, 81, 82, 83, 84, 86, 91, 92, 95, 96, 97, 98, 101, 102, 103], "an": [0, 5, 29, 32, 33, 35, 40, 44, 64, 82, 96, 98, 101, 102, 103], "open": [0, 101, 102], "sourc": [0, 4, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33, 35, 36, 38, 39, 40, 44, 46, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 80, 81, 82, 83, 84, 86, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 101, 102], "python": [0, 1, 5, 13, 14, 16, 27, 29, 101, 102, 103], "framework": [0, 102], "aim": [0, 1, 101, 102], "provid": [0, 1, 8, 10, 12, 44, 45, 64, 82, 101, 102, 103], "high": [0, 102], "qualiti": [0, 102], "user": [0, 45, 80, 102, 103], "friendli": [0, 102], "end": [0, 1, 5, 32, 35, 80, 102], "function": [0, 5, 6, 8, 29, 36, 39, 44, 46, 49, 52, 53, 64, 68, 69, 74, 75, 77, 78, 82, 83, 85, 90, 91, 92, 95, 96, 97, 99, 102], "perform": [0, 46, 48, 49, 54, 56, 58, 70, 72, 74, 83, 102], "reconstruct": [0, 1, 15, 17, 18, 22, 23, 25, 33, 41, 45, 58, 71, 74, 102], "task": [0, 1, 45, 70, 72, 73, 82, 101, 102], "neutrino": [0, 1, 48, 58, 68, 77, 102], "telescop": [0, 1, 102], "us": [0, 1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 14, 19, 24, 26, 27, 32, 33, 35, 36, 37, 38, 40, 41, 44, 45, 48, 49, 51, 56, 57, 58, 62, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 80, 81, 82, 84, 85, 86, 87, 88, 90, 91, 92, 93, 97, 98, 101, 102, 103], "graph": [0, 1, 6, 8, 10, 12, 44, 45, 48, 49, 51, 61, 62, 63, 64, 66, 67, 68, 74, 75, 81, 83, 101, 102], "neural": [0, 1, 102], "network": [0, 1, 55, 102], "gnn": [0, 1, 33, 45, 55, 56, 57, 58, 64, 70, 102, 103], "make": [0, 5, 84, 90, 91, 101, 102, 103], "fast": [0, 102, 103], "easi": [0, 102], "train": [0, 1, 7, 40, 41, 44, 64, 70, 80, 81, 82, 83, 84, 86, 90, 91, 93, 100, 102, 103], "complex": [0, 45, 102], "model": [0, 1, 41, 44, 46, 47, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 70, 71, 72, 73, 74, 75, 78, 79, 80, 82, 83, 86, 88, 90, 91, 93, 100, 102, 103], "can": [0, 1, 8, 10, 12, 14, 16, 19, 38, 44, 49, 64, 77, 78, 84, 86, 88, 90, 91, 101, 102, 103], "event": [0, 1, 8, 10, 12, 21, 36, 38, 40, 44, 49, 64, 68, 74, 75, 77, 82, 84, 90, 102], "state": [0, 95, 102], "art": [0, 102], "arbitrari": [0, 102], "detector": [0, 1, 24, 45, 52, 53, 64, 65, 67, 70, 102], "configur": [0, 1, 8, 45, 69, 70, 77, 85, 87, 88, 90, 91, 93, 98, 102], "infer": [0, 1, 33, 41, 44, 70, 74, 102, 103], "time": [0, 4, 36, 46, 49, 63, 67, 68, 73, 98, 102, 103], "ar": [0, 1, 4, 5, 8, 10, 12, 16, 29, 30, 32, 35, 38, 40, 44, 49, 56, 58, 60, 61, 62, 64, 65, 66, 68, 72, 77, 82, 84, 90, 91, 101, 102, 103], "order": [0, 27, 46, 75, 102], "magnitud": [0, 102], "faster": [0, 102], "than": [0, 6, 74, 83, 98, 102], "tradit": [0, 102], "techniqu": [0, 102], "common": [0, 1, 82, 88, 90, 91, 93, 94, 97, 102], "ml": [0, 1, 102], "develop": [0, 1, 101, 102, 103], "physicist": [0, 1, 102], "wish": [0, 101, 102], "tool": [0, 1, 102], "research": [0, 102], "By": [0, 38, 74, 102], "unit": [0, 5, 63, 97, 101, 102], "both": [0, 16, 74, 78, 102], "group": [0, 5, 32, 35, 49, 102], "increas": [0, 80, 102], "longev": [0, 102], "usabl": [0, 102], "individu": [0, 5, 8, 10, 12, 49, 56, 75, 102], "code": [0, 24, 36, 64, 90, 91, 102], "contribut": [0, 102, 103], "from": [0, 1, 6, 8, 10, 12, 13, 14, 16, 18, 19, 21, 27, 28, 29, 30, 33, 35, 38, 44, 49, 58, 62, 64, 67, 68, 69, 72, 73, 74, 75, 78, 80, 81, 82, 88, 89, 90, 91, 93, 98, 101, 102, 103], "build": [0, 1, 45, 51, 62, 63, 67, 68, 69, 88, 90, 91, 102], "gener": [0, 5, 8, 10, 12, 16, 30, 44, 60, 61, 64, 65, 66, 72, 82, 102], "reusabl": [0, 102], "softwar": [0, 82, 102], "packag": [0, 1, 39, 92, 96, 97, 101, 102, 103], "base": [0, 4, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30, 32, 33, 35, 38, 40, 44, 46, 48, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 69, 70, 72, 73, 74, 77, 80, 81, 82, 83, 84, 86, 88, 89, 90, 91, 93, 97, 98, 102], "engin": [0, 102], "best": [0, 80, 101, 102], "practic": [0, 101, 102], "lower": [0, 78, 102], "technic": [0, 102], "threshold": [0, 44, 102], "most": [0, 1, 40, 63, 74, 102, 103], "scientif": [0, 1, 102], "problem": [0, 62, 101, 102], "The": [0, 5, 8, 10, 12, 27, 29, 33, 35, 36, 44, 46, 48, 49, 56, 58, 62, 64, 68, 72, 73, 74, 75, 77, 78, 80, 81, 82, 95, 102], "improv": [0, 1, 86, 102], "classif": [0, 1, 45, 71, 74, 82, 102], "yield": [0, 56, 77, 82, 102], "veri": [0, 40, 102], "accur": [0, 102], "e": [0, 1, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 29, 32, 33, 35, 36, 40, 44, 46, 48, 49, 51, 52, 53, 55, 59, 62, 64, 67, 68, 69, 70, 72, 73, 74, 75, 80, 81, 82, 84, 88, 98, 101, 102, 103], "g": [0, 1, 5, 8, 10, 12, 24, 27, 29, 32, 33, 35, 36, 40, 44, 49, 64, 67, 68, 74, 75, 84, 98, 101, 102, 103], "low": [0, 102], "energi": [0, 4, 33, 73, 74, 84, 102], "observ": [0, 102], "icecub": [0, 1, 15, 28, 29, 45, 48, 50, 58, 97, 102, 103], "here": [0, 64, 101, 102, 103], "implement": [0, 1, 5, 14, 31, 32, 34, 35, 48, 55, 56, 57, 58, 62, 82, 101, 102], "wa": [0, 102], "appli": [0, 8, 10, 12, 14, 49, 55, 56, 57, 58, 59, 70, 72, 74, 92, 102], "oscil": [0, 76, 102], "lead": [0, 102], "signific": [0, 102], "angular": [0, 102], "rang": [0, 74, 102], "relev": [0, 1, 29, 39, 96, 101, 102], "studi": [0, 102], "furthermor": [0, 102], "shown": [0, 102], "could": [0, 101, 102], "muon": [0, 18, 102], "v": [0, 102], "therebi": [0, 1, 90, 91, 102], "effici": [0, 102], "puriti": [0, 102], "sampl": [0, 40, 64, 65, 74, 102], "analysi": [0, 33, 102, 103], "similarli": [0, 29, 102], "ha": [0, 5, 29, 32, 35, 36, 44, 55, 68, 82, 96, 102, 103], "great": [0, 102], "point": [0, 23, 81, 82, 83, 102], "analys": [0, 41, 76, 102], "final": [0, 49, 80, 90, 102], "millisecond": [0, 102], "allow": [0, 41, 45, 49, 80, 88, 93, 102, 103], "whole": [0, 102], "new": [0, 1, 35, 48, 67, 88, 93, 101, 102], "type": [0, 5, 6, 8, 10, 12, 13, 14, 26, 27, 28, 32, 35, 36, 38, 39, 40, 46, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 80, 82, 83, 84, 86, 88, 89, 90, 91, 92, 95, 96, 97, 98, 99, 101, 102], "cosmic": [0, 102], "alert": [0, 102], "which": [0, 8, 10, 12, 14, 15, 24, 28, 33, 40, 44, 46, 49, 56, 64, 65, 68, 69, 72, 77, 82, 83, 86, 102, 103], "were": [0, 102], "previous": [0, 102], "unfeas": [0, 102], "possibl": [0, 27, 101, 102], "identifi": [0, 5, 8, 10, 12, 24, 68, 90, 91, 102], "10": [0, 33, 52, 53, 67, 68, 86, 102], "tev": [0, 102], "monitor": [0, 102], "rate": [0, 80, 102], "direct": [0, 58, 68, 72, 73, 74, 79, 81, 102], "real": [0, 102], "thi": [0, 3, 5, 8, 10, 12, 14, 16, 29, 32, 35, 36, 39, 44, 45, 49, 56, 63, 64, 65, 67, 70, 72, 73, 74, 75, 77, 78, 80, 82, 83, 84, 88, 90, 91, 93, 98, 101, 102, 103], "enabl": [0, 3, 102], "first": [0, 63, 80, 83, 88, 93, 101, 102], "ever": [0, 102], "despit": [0, 102], "larg": [0, 82, 102], "background": [0, 102], "origin": [0, 77, 102], "compris": [0, 102], "number": [0, 5, 8, 10, 12, 32, 33, 35, 40, 48, 49, 55, 56, 57, 58, 59, 62, 63, 65, 67, 68, 72, 73, 74, 80, 83, 86, 102], "modul": [0, 3, 8, 29, 33, 41, 44, 45, 48, 50, 54, 60, 61, 63, 64, 65, 66, 69, 71, 76, 79, 85, 87, 90, 91, 92, 93, 97, 102], "necessari": [0, 27, 101, 102], "workflow": [0, 102], "ingest": [0, 1, 3, 50, 102], "raw": [0, 67, 68, 102], "data": [0, 1, 4, 5, 6, 8, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 46, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 62, 64, 65, 70, 74, 75, 81, 83, 86, 88, 90, 93, 97, 100, 102, 103], "domain": [0, 1, 3, 41, 102], "specif": [0, 1, 3, 5, 8, 10, 12, 15, 29, 31, 32, 34, 35, 36, 41, 46, 49, 50, 51, 52, 53, 54, 59, 62, 64, 67, 70, 71, 72, 73, 74, 82, 101, 102, 103], "format": [0, 1, 3, 5, 8, 27, 32, 35, 78, 90, 101, 102, 103], "deploi": [0, 1, 41, 44, 102], "chain": [0, 1, 41, 45, 70, 102, 103], "illustr": [0, 101, 102], "figur": [0, 78, 102], "level": [0, 8, 10, 12, 24, 30, 36, 46, 49, 98, 102, 103], "overview": [0, 102], "typic": [0, 27, 102], "convert": [0, 1, 3, 5, 27, 30, 32, 35, 38, 102, 103], "industri": [0, 3, 102], "standard": [0, 3, 4, 5, 30, 32, 35, 40, 52, 53, 64, 65, 67, 70, 74, 86, 101, 102], "intermedi": [0, 1, 3, 5, 8, 32, 35, 55, 102, 103], "file": [0, 1, 3, 5, 8, 10, 12, 14, 27, 30, 32, 35, 38, 39, 44, 64, 69, 77, 80, 82, 86, 87, 88, 89, 90, 91, 96, 98, 102, 103], "read": [0, 3, 8, 10, 12, 27, 51, 56, 70, 71, 102, 103], "simpl": [0, 45, 102], "physic": [0, 1, 14, 28, 29, 41, 44, 45, 71, 74, 102], "orient": [0, 45, 102], "compon": [0, 1, 45, 48, 49, 69, 70, 102], "manag": [0, 14, 79, 102], "experi": [0, 1, 79, 102], "log": [0, 1, 73, 79, 80, 82, 85, 102, 103], "deploy": [0, 1, 42, 44, 64, 100, 102], "modular": [0, 45, 102], "subclass": [0, 45, 102], "torch": [0, 8, 10, 12, 45, 48, 64, 65, 69, 97, 102, 103], "nn": [0, 45, 48, 62, 65, 102], "mean": [0, 5, 8, 10, 12, 32, 35, 45, 56, 58, 68, 82, 91, 102], "onli": [0, 1, 8, 10, 12, 45, 49, 74, 77, 84, 91, 97, 102, 103], "need": [0, 27, 45, 69, 82, 95, 102, 103], "import": [0, 1, 36, 45, 85, 102], "few": [0, 45, 101, 102], "exist": [0, 8, 10, 12, 33, 35, 36, 45, 81, 90, 102], "purpos": [0, 45, 82, 102], "built": [0, 45, 64, 102], "them": [0, 1, 27, 45, 56, 74, 77, 102, 103], "togeth": [0, 45, 62, 70, 102], "form": [0, 45, 72, 88, 93, 102], "complet": [0, 45, 70, 102], "extend": [0, 1, 102], "suit": [0, 74, 102], "through": [0, 72, 73, 74, 82, 102], "layer": [0, 45, 47, 49, 55, 56, 57, 58, 72, 73, 74, 102], "connect": [0, 62, 63, 64, 67, 82, 102], "etc": [0, 82, 98, 102], "optimis": [0, 1, 102], "differ": [0, 8, 10, 12, 14, 65, 70, 83, 101, 102, 103], "track": [0, 14, 18, 73, 101, 102], "These": [0, 64, 101, 102], "prepar": [0, 82, 102], "satisfi": [0, 102], "o": [0, 74, 102], "load": [0, 6, 8, 39, 69, 88, 90, 102], "requir": [0, 20, 36, 72, 82, 90, 91, 93, 102, 103], "when": [0, 5, 8, 10, 12, 27, 30, 32, 35, 36, 44, 48, 56, 58, 81, 98, 101, 102, 103], "batch": [0, 6, 33, 46, 48, 49, 70, 75, 83, 86, 102], "do": [0, 44, 82, 90, 91, 101, 102, 103], "predict": [0, 19, 23, 25, 33, 44, 55, 70, 72, 74, 82, 83, 102], "either": [0, 8, 10, 12, 82, 102, 103], "contain": [0, 5, 8, 10, 12, 27, 28, 32, 33, 35, 44, 56, 60, 61, 63, 64, 65, 66, 68, 69, 74, 82, 84, 86, 102, 103], "imag": [0, 1, 101, 102, 103], "portabl": [0, 102], "depend": [0, 102, 103], "free": [0, 82, 102], "split": [0, 30, 46, 102], "up": [0, 5, 32, 35, 44, 101, 102, 103], "interfac": [0, 76, 90, 91, 102, 103], "block": [0, 1, 102], "pre": [0, 51, 64, 81, 101, 102], "directli": [0, 14, 102], "while": [0, 16, 80, 102], "continu": [0, 82, 102], "expand": [0, 102], "": [0, 5, 6, 8, 10, 12, 14, 27, 35, 38, 51, 55, 56, 64, 70, 74, 75, 80, 84, 86, 90, 91, 98, 99, 102, 103], "capabl": [0, 102], "project": [0, 101, 102], "receiv": [0, 102], "fund": [0, 102], "european": [0, 102], "union": [0, 6, 8, 10, 12, 16, 27, 29, 44, 46, 48, 49, 56, 64, 65, 70, 74, 90, 93, 96, 102], "horizon": [0, 102], "2020": [0, 102], "innov": [0, 102], "programm": [0, 102], "under": [0, 102], "mari": [0, 102], "sk\u0142odowska": [0, 102], "curi": [0, 102], "grant": [0, 82, 102], "agreement": [0, 101, 102], "No": [0, 102], "890778": [0, 102], "work": [0, 4, 28, 101, 102, 103], "rasmu": [0, 57, 102], "\u00f8rs\u00f8e": [0, 102], "partli": [0, 102], "punch4nfdi": [0, 102], "consortium": [0, 102], "support": [0, 29, 101, 102, 103], "dfg": [0, 102], "nfdi": [0, 102], "39": [0, 102, 103], "1": [0, 5, 8, 27, 32, 35, 40, 46, 49, 56, 58, 62, 63, 65, 68, 72, 73, 74, 75, 80, 82, 83, 84, 90, 102, 103], "germani": [0, 102], "conveni": [1, 101, 103], "collabor": 1, "solv": [1, 101], "It": [1, 27, 36, 44, 68, 72, 74, 101], "leverag": 1, "advanc": [1, 49], "machin": [1, 103], "learn": [1, 44, 72, 74, 80, 103], "without": [1, 62, 67, 77, 82, 103], "have": [1, 5, 16, 32, 35, 36, 40, 49, 64, 68, 74, 101, 103], "expert": 1, "themselv": [1, 90, 91], "acceler": 1, "area": 1, "phyic": 1, "design": 1, "principl": 1, "all": [1, 5, 8, 10, 12, 14, 16, 30, 32, 35, 36, 44, 48, 49, 51, 56, 59, 63, 64, 69, 82, 88, 89, 90, 91, 92, 93, 98, 101, 103], "streamlin": 1, "process": [1, 5, 14, 44, 51, 56, 58, 101, 103], "transform": [1, 49, 58, 74, 84], "extens": [1, 96], "basic": 1, "across": [1, 2, 8, 10, 12, 29, 37, 49, 70, 82, 85, 86, 87, 98], "variou": 1, "easili": 1, "architectur": [1, 55, 56, 57, 58], "main": [1, 54, 64, 70, 101, 103], "featur": [1, 3, 4, 5, 8, 10, 12, 15, 33, 44, 48, 49, 51, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 72, 75, 83, 90, 101], "i3": [1, 5, 14, 28, 29, 30, 32, 35, 39, 44, 96, 103], "more": [1, 8, 36, 39, 88, 90, 91, 93, 98], "index": [1, 5, 8, 10, 12, 29, 36, 49, 51, 63, 68, 80], "sqlite": [1, 3, 7, 12, 33, 35, 36, 38, 103], "suitabl": 1, "plug": 1, "plai": 1, "abstract": [1, 5, 8, 51, 59, 64, 74, 89], "awai": 1, "detail": [1, 80, 103], "expos": 1, "physicst": 1, "what": [1, 64, 101], "i3modul": [1, 41, 44], "includ": [1, 70, 77, 82, 88, 101], "docker": 1, "run": [1, 38], "containeris": 1, "fashion": 1, "subpackag": [1, 3, 7, 13, 41, 45, 60, 85], "dataset": [1, 3, 6, 9, 10, 11, 12, 18, 40, 64, 86, 90], "extractor": [1, 3, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 32, 35, 44], "parquet": [1, 3, 7, 10, 32, 38, 52, 53, 103], "util": [1, 3, 13, 27, 28, 29, 36, 38, 39, 40, 45, 60, 79, 86, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 100], "constant": [1, 3, 100], "dataconvert": [1, 3, 32, 35], "dataload": [1, 3, 33, 64, 70, 83, 93], "filter": [1, 3, 98], "pipelin": [1, 3], "coarsen": [1, 45, 49], "standard_model": [1, 45], "pisa": [1, 20, 33, 77, 78, 97, 100, 103], "fit": [1, 70, 76, 78, 82, 84, 93], "plot": [1, 76], "callback": [1, 70, 79], "label": [1, 8, 18, 21, 55, 64, 70, 74, 78, 79, 83], "loss_funct": [1, 74, 79], "weight_fit": [1, 79], "config": [1, 6, 40, 77, 80, 82, 85, 86, 88, 89, 90, 91, 92, 93], "argpars": [1, 85], "decor": [1, 5, 85, 97], "deprecation_tool": [1, 85], "filesi": [1, 85], "math": [1, 85], "submodul": [1, 3, 7, 9, 11, 13, 26, 31, 34, 37, 42, 45, 47, 50, 54, 60, 61, 66, 71, 76, 79, 85, 87, 92], "global": [2, 4, 56, 58, 69], "i3extractor": [3, 5, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 35], "i3featureextractor": [3, 4, 13, 35, 44], "i3genericextractor": [3, 13, 35], "i3hybridrecoextractor": [3, 13], "i3ntmuonlabelsextractor": [3, 13], "i3particleextractor": [3, 13], "i3pisaextractor": [3, 13], "i3quesoextractor": [3, 13], "i3retroextractor": [3, 13], "i3splinempeextractor": [3, 13], "i3truthextractor": [3, 4, 13], "i3tumextractor": [3, 13], "parquet_dataconvert": [3, 31], "sqlite_dataconvert": [3, 34], "sqlite_util": [3, 34], "parquet_to_sqlit": [3, 37], "random": [3, 8, 10, 12, 37, 40, 90], "string_selection_resolv": [3, 37], "truth": [3, 4, 8, 10, 12, 15, 24, 33, 36, 64, 74, 83, 84, 90], "fileset": [3, 5], "init_global_index": [3, 5], "cache_output_fil": [3, 5], "collate_fn": [3, 6, 79, 83], "do_shuffl": [3, 6], "i3filt": [3, 5, 30, 32, 35], "nullspliti3filt": [3, 30], "i3filtermask": [3, 30], "insqlitepipelin": [3, 33], "class": [4, 5, 6, 7, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 29, 30, 31, 32, 33, 34, 35, 38, 40, 44, 46, 48, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 69, 70, 72, 73, 74, 77, 80, 81, 82, 83, 84, 86, 88, 89, 90, 91, 92, 93, 98, 101], "object": [4, 5, 8, 10, 12, 14, 16, 27, 29, 44, 46, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 69, 70, 72, 73, 74, 77, 80, 82, 83, 86, 98], "namespac": [4, 69, 90, 91], "name": [4, 5, 6, 8, 10, 12, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 30, 32, 33, 35, 36, 38, 44, 51, 64, 65, 67, 74, 77, 81, 84, 86, 88, 90, 91, 92, 93, 98, 101, 103], "icecube86": [4, 50, 52], "dom_x": [4, 44, 52, 67], "dom_i": [4, 44, 52, 67], "dom_z": [4, 44, 52, 67], "dom_tim": 4, "charg": [4, 44, 67, 68, 82], "rde": 4, "pmt_area": 4, "deepcor": [4, 15, 52], "upgrad": [4, 15, 52, 103], "string": [4, 5, 8, 10, 12, 27, 32, 35, 40, 49, 51, 52, 64, 88], "pmt_number": 4, "dom_numb": 4, "pmt_dir_x": 4, "pmt_dir_i": 4, "pmt_dir_z": 4, "dom_typ": 4, "prometheu": [4, 45, 50], "sensor_pos_x": [4, 53], "sensor_pos_i": [4, 53], "sensor_pos_z": [4, 53], "t": [4, 29, 36, 78, 80, 82, 103], "kaggl": [4, 48, 52, 58], "x": [4, 5, 24, 32, 35, 48, 49, 63, 67, 68, 74, 75, 78, 82, 84], "y": [4, 24, 63, 75, 78, 103], "z": [4, 5, 24, 32, 35, 63, 75, 103], "auxiliari": 4, "energy_track": [4, 73], "energy_cascad": [4, 73], "position_x": 4, "position_i": 4, "position_z": 4, "azimuth": [4, 73, 81], "zenith": [4, 73, 81], "pid": [4, 40, 90], "elast": 4, "sim_typ": 4, "interaction_typ": 4, "interaction_tim": [4, 73], "inelast": [4, 73], "stopped_muon": 4, "injection_energi": 4, "injection_typ": 4, "injection_interaction_typ": 4, "injection_zenith": 4, "injection_azimuth": 4, "injection_bjorkenx": 4, "injection_bjorkeni": 4, "injection_position_x": 4, "injection_position_i": 4, "injection_position_z": 4, "injection_column_depth": 4, "primary_lepton_1_typ": 4, "primary_hadron_1_typ": 4, "primary_lepton_1_position_x": 4, "primary_lepton_1_position_i": 4, "primary_lepton_1_position_z": 4, "primary_hadron_1_position_x": 4, "primary_hadron_1_position_i": 4, "primary_hadron_1_position_z": 4, "primary_lepton_1_direction_theta": 4, "primary_lepton_1_direction_phi": 4, "primary_hadron_1_direction_theta": 4, "primary_hadron_1_direction_phi": 4, "primary_lepton_1_energi": 4, "primary_hadron_1_energi": 4, "total_energi": 4, "i3_fil": [5, 14], "str": [5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33, 35, 36, 38, 39, 40, 44, 48, 49, 51, 52, 53, 56, 58, 64, 65, 67, 68, 69, 70, 74, 77, 80, 81, 83, 84, 86, 88, 89, 90, 91, 92, 93, 95, 96, 98], "gcd_file": [5, 14, 44], "paramet": [5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33, 35, 36, 38, 39, 40, 44, 46, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 80, 81, 82, 83, 84, 86, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99], "output_fil": [5, 32, 35], "global_index": 5, "avail": [5, 16, 33, 72, 73, 74, 97], "pool": [5, 45, 46, 47, 56, 58], "worker": [5, 32, 33, 35, 39, 86, 98], "return": [5, 6, 8, 10, 12, 14, 27, 28, 29, 32, 35, 36, 38, 39, 40, 46, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 80, 81, 82, 83, 84, 86, 88, 89, 90, 91, 92, 95, 96, 97, 98, 99], "none": [5, 6, 8, 10, 12, 14, 16, 24, 28, 29, 30, 32, 33, 35, 36, 38, 40, 44, 48, 49, 56, 58, 63, 64, 65, 67, 69, 70, 74, 77, 80, 82, 83, 84, 86, 88, 89, 90, 92, 96, 98], "synchron": 5, "list": [5, 6, 8, 10, 12, 14, 16, 24, 27, 29, 30, 32, 33, 35, 36, 38, 39, 40, 44, 46, 48, 49, 51, 56, 58, 62, 63, 64, 65, 67, 68, 69, 70, 74, 75, 78, 80, 83, 84, 90, 92, 93, 96, 98], "process_method": 5, "cach": 5, "output": [5, 32, 35, 38, 55, 56, 57, 59, 67, 68, 70, 72, 73, 74, 77, 84, 90, 91, 103], "typevar": 5, "f": [5, 49], "bound": [5, 78], "callabl": [5, 6, 8, 29, 48, 49, 51, 52, 53, 64, 74, 83, 84, 88, 90, 91, 92, 97], "ani": [5, 6, 8, 10, 12, 27, 28, 29, 30, 32, 35, 44, 46, 48, 49, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 69, 70, 72, 73, 74, 78, 80, 82, 84, 86, 88, 89, 90, 91, 92, 93, 98, 103], "outdir": [5, 32, 33, 35, 38, 77], "gcd_rescu": [5, 32, 35, 96], "nb_files_to_batch": [5, 32, 35], "sequential_batch_pattern": [5, 32, 35], "input_file_batch_pattern": [5, 32, 35], "index_column": [5, 8, 10, 12, 32, 35, 36, 40, 77, 83, 84, 90], "icetray_verbos": [5, 32, 35], "i3_filt": [5, 32, 35], "abc": [5, 8, 14, 33, 69, 81, 84, 89, 90, 91], "logger": [5, 8, 14, 30, 33, 38, 40, 62, 69, 70, 81, 84, 85, 98, 103], "construct": [5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30, 32, 35, 38, 40, 46, 47, 48, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 77, 80, 81, 82, 83, 84, 86, 89, 90, 91, 98], "regular": [5, 29, 32, 35], "express": [5, 32, 35, 69, 82], "accord": [5, 32, 35, 46, 49, 62, 64, 65, 68], "match": [5, 32, 35, 84, 96, 99], "certain": [5, 32, 35, 38, 77], "pattern": [5, 32, 35], "wildcard": [5, 32, 35], "same": [5, 29, 32, 35, 36, 46, 49, 68, 72, 75, 80, 92, 98], "input": [5, 8, 10, 12, 32, 33, 35, 44, 52, 55, 56, 57, 58, 59, 64, 65, 67, 72, 74, 75, 88, 93, 95], "replac": [5, 32, 35, 88, 90, 91, 93, 95], "period": [5, 32, 35], "special": [5, 16, 32, 35, 44, 75], "interpret": [5, 32, 35, 72], "liter": [5, 32, 35], "charact": [5, 32, 35], "regex": [5, 32, 35], "For": [5, 29, 32, 35, 80], "instanc": [5, 8, 14, 24, 29, 32, 35, 44, 64, 69, 77, 81, 83, 89, 91, 103], "A": [5, 8, 30, 32, 33, 35, 44, 49, 64, 65, 68, 72, 74, 75, 77, 82, 84, 103], "_": [5, 32, 35], "0": [5, 8, 10, 12, 32, 35, 40, 44, 46, 49, 55, 56, 58, 62, 63, 65, 68, 75, 77, 78, 82, 83, 90], "9": [5, 32, 35], "5": [5, 8, 10, 12, 32, 35, 40, 86, 103], "zst": [5, 32, 35], "find": [5, 32, 35, 63, 96], "whose": [5, 32, 35, 44], "one": [5, 8, 32, 35, 36, 44, 49, 90, 91, 96, 101, 103], "capit": [5, 32, 35], "letter": [5, 32, 35], "follow": [5, 32, 35, 56, 70, 82, 84, 101, 103], "underscor": [5, 32, 35], "five": [5, 32, 35], "upgrade_genie_step4_141020_a_000000": [5, 32, 35], "upgrade_genie_step4_141020_a_000001": [5, 32, 35], "upgrade_genie_step4_141020_a_000008": [5, 32, 35], "upgrade_genie_step4_141020_a_000009": [5, 32, 35], "would": [5, 32, 35, 101], "upgrade_genie_step4_141020_a_00000x": [5, 32, 35], "suffix": [5, 32, 35], "upgrade_genie_step4_141020_a_000010": [5, 32, 35], "separ": [5, 27, 32, 35, 63, 80, 103], "upgrade_genie_step4_141020_a_00001x": [5, 32, 35], "int": [5, 6, 8, 10, 12, 18, 21, 30, 32, 33, 35, 40, 48, 49, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 68, 70, 72, 73, 74, 75, 77, 80, 82, 83, 84, 86, 90, 93, 98], "properti": [5, 8, 14, 19, 29, 49, 51, 59, 67, 70, 74, 81, 89, 98], "file_suffix": [5, 32, 35], "execut": [5, 36], "method": [5, 8, 10, 12, 14, 26, 27, 28, 29, 32, 35, 44, 48, 49, 51, 73, 82, 84], "set": [5, 16, 67, 68, 74, 83, 101], "inherit": [5, 14, 29, 51, 67, 82, 98], "path": [5, 8, 10, 12, 36, 39, 44, 64, 69, 77, 78, 80, 86, 88, 89, 90, 96, 103], "correspond": [5, 8, 10, 12, 27, 29, 35, 39, 56, 64, 68, 84, 96, 103], "gcd": [5, 14, 28, 39, 44, 96], "save_data": [5, 32, 35], "save": [5, 14, 27, 32, 35, 36, 69, 77, 80, 82, 83, 84, 88, 89, 90, 91, 103], "ordereddict": [5, 32, 35], "extract": [5, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 35, 38, 39, 44, 74], "merge_fil": [5, 32, 35], "input_fil": [5, 32, 35], "merg": [5, 32, 35, 82, 103], "result": [5, 32, 35, 49, 68, 80, 82, 83, 92, 103], "option": [5, 8, 10, 12, 24, 32, 33, 35, 44, 48, 49, 56, 58, 63, 64, 65, 67, 69, 74, 77, 78, 80, 84, 85, 86, 88, 90, 96, 103], "default": [5, 8, 10, 12, 16, 24, 27, 32, 33, 35, 36, 38, 44, 48, 49, 55, 56, 57, 58, 62, 63, 64, 65, 67, 69, 74, 77, 78, 80, 81, 82, 84, 86, 88, 90, 96], "current": [5, 32, 35, 40, 80, 101, 103], "rais": [5, 8, 16, 32, 69, 88, 93], "notimplementederror": [5, 32], "If": [5, 8, 16, 30, 32, 33, 35, 64, 67, 68, 69, 74, 77, 80, 84, 101, 103], "been": [5, 32, 44, 82, 101], "backend": [5, 9, 11, 32, 35], "question": 5, "get_map_funct": 5, "nb_file": 5, "map": [5, 8, 10, 12, 15, 16, 35, 36, 44, 52, 53, 64, 65, 74, 88, 90, 91, 93], "pure": [5, 13, 14, 16, 29], "multiprocess": [5, 103], "tupl": [5, 8, 10, 12, 28, 29, 48, 56, 58, 68, 74, 75, 77, 78, 83, 86, 95], "remov": [6, 64, 83, 86], "less": [6, 83], "two": [6, 56, 77, 80, 82, 83], "dom": [6, 8, 10, 12, 46, 49, 67, 68, 83], "hit": [6, 83], "should": [6, 8, 10, 12, 14, 27, 40, 48, 49, 64, 65, 82, 83, 88, 90, 91, 93, 101, 103], "occur": [6, 83], "product": [6, 83], "selection_nam": 6, "check": [6, 28, 29, 30, 35, 36, 86, 96, 97, 101, 103], "whether": [6, 28, 29, 35, 36, 56, 58, 69, 82, 92, 96, 97], "shuffl": [6, 39, 83], "select": [6, 8, 10, 12, 21, 40, 83, 84, 90, 101], "bool": [6, 28, 29, 30, 35, 36, 40, 44, 56, 58, 64, 67, 68, 69, 70, 77, 80, 82, 83, 84, 86, 92, 95, 96, 97, 98], "batch_siz": [6, 33, 75, 83], "num_work": [6, 83], "persistent_work": [6, 83], "prefetch_factor": 6, "kwarg": [6, 8, 10, 12, 30, 46, 48, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 69, 70, 72, 73, 74, 80, 82, 84, 88, 90, 91, 98], "t_co": 6, "classmethod": [6, 8, 69, 82, 88, 89], "from_dataset_config": 6, "datasetconfig": [6, 8, 40, 87, 90], "dict": [6, 8, 16, 27, 29, 33, 35, 51, 52, 53, 64, 65, 69, 70, 77, 78, 80, 83, 86, 88, 90, 91, 92, 93, 95], "parquet_dataset": [7, 9], "sqlite_dataset": [7, 11], "columnmissingexcept": [7, 8], "load_modul": [7, 8, 69], "parse_graph_definit": [7, 8], "ensembledataset": [7, 8, 90], "except": 8, "indic": [8, 40, 49, 63, 68, 80, 86, 101], "miss": 8, "column": [8, 10, 12, 36, 44, 51, 62, 64, 65, 67, 68, 70, 72, 73, 74, 75, 77, 84], "class_nam": [8, 30, 91, 98], "cfg": 8, "graphdefinit": [8, 10, 12, 44, 60, 61, 64, 65, 66, 83, 101], "arg": [8, 10, 12, 30, 46, 51, 52, 53, 55, 56, 57, 58, 59, 62, 63, 64, 65, 67, 69, 70, 72, 73, 74, 82, 86, 88, 93, 98], "pulsemap": [8, 10, 12, 15, 35, 44, 83, 90], "puls": [8, 10, 12, 15, 16, 28, 29, 35, 36, 44, 46, 49, 64, 67, 68, 75], "seri": [8, 10, 12, 15, 16, 28, 29, 36, 44], "node": [8, 10, 12, 45, 46, 49, 55, 56, 58, 60, 61, 62, 64, 65, 75], "multipl": [8, 10, 12, 14, 68, 80, 90, 98], "store": [8, 10, 12, 14, 33, 36, 77, 81], "ad": [8, 10, 12, 15, 56, 64, 67, 68, 77], "attribut": [8, 10, 12, 46, 74], "node_truth": [8, 10, 12, 83, 90], "event_no": [8, 10, 12, 36, 40, 84, 90], "uniqu": [8, 10, 12, 36, 38, 67, 90], "indici": [8, 10, 12, 28, 40, 82], "tabl": [8, 10, 12, 14, 33, 35, 36, 51, 64, 77, 84], "truth_tabl": [8, 10, 12, 77, 83, 84, 90], "inform": [8, 10, 12, 14, 16, 24, 64, 67, 68, 78], "node_truth_t": [8, 10, 12, 83, 90], "string_select": [8, 10, 12, 83, 90], "subset": [8, 10, 12, 48, 56, 58], "given": [8, 10, 12, 35, 49, 62, 74, 84, 86], "queri": [8, 10, 12, 36, 40], "pass": [8, 10, 12, 48, 55, 56, 57, 58, 59, 64, 70, 74, 80, 82, 84, 101], "dtype": [8, 10, 12, 64, 65, 99], "float32": [8, 10, 12, 64, 65], "tensor": [8, 10, 12, 46, 48, 49, 51, 55, 56, 57, 58, 59, 63, 67, 70, 74, 75, 82, 95, 99], "loss_weight_t": [8, 10, 12, 83, 90], "per": [8, 10, 12, 16, 36, 49, 74, 82, 84], "loss": [8, 10, 12, 64, 70, 74, 80, 82, 86], "weight": [8, 10, 12, 44, 64, 74, 77, 82, 84, 91, 103], "loss_weight_column": [8, 10, 12, 64, 83, 90], "also": [8, 10, 12, 40, 90], "assign": [8, 10, 12, 38, 46, 49, 101], "loss_weight_default_valu": [8, 10, 12, 64, 90], "float": [8, 10, 12, 44, 55, 62, 63, 64, 65, 70, 77, 78, 80, 82, 83, 90], "note": [8, 10, 12, 68, 78, 91], "valu": [8, 10, 12, 24, 27, 35, 36, 49, 63, 64, 65, 78, 81, 82, 86, 88], "specifi": [8, 10, 12, 40, 46, 68, 74, 78, 80, 103], "case": [8, 10, 12, 16, 44, 49, 68, 74, 103], "That": [8, 10, 12, 56, 73, 81], "ignor": [8, 10, 12, 29], "seed": [8, 10, 12, 40, 64, 65, 83, 90], "resolv": [8, 10, 12, 40], "10000": [8, 10, 12, 40], "20": [8, 10, 12, 40, 98], "graph_definit": [8, 10, 12, 44, 45, 60, 83, 90], "defin": [8, 10, 12, 40, 44, 49, 60, 61, 62, 64, 66, 68, 83, 88, 90, 91, 93], "represent": [8, 10, 12, 29, 49, 65], "from_config": [8, 69, 89, 90, 91], "concaten": [8, 27, 56], "query_t": [8, 10, 12], "sequential_index": [8, 10, 12], "some": [8, 10, 12, 64, 68], "out": [8, 56, 70, 71, 82, 98, 101, 103], "sequenti": 8, "len": [8, 68], "self": [8, 64, 77, 88, 93], "_may_": 8, "_indic": 8, "entir": [8, 69], "impos": 8, "befor": [8, 56, 63, 74, 80], "scalar": [8, 75, 82], "length": [8, 29, 68, 80], "element": [8, 27, 29, 70, 75, 83, 92], "present": [8, 30, 86, 96, 97], "add_label": 8, "fn": [8, 29, 88, 92], "kei": [8, 16, 27, 28, 29, 35, 36, 49, 81, 90, 91], "add": [8, 56, 86, 95, 101, 103], "custom": [8, 64, 80], "concatdataset": 8, "singl": [8, 14, 49, 56, 68, 81, 90, 91], "collect": [8, 13, 14, 26, 82, 99], "iter": 8, "parquetdataset": [9, 10], "pytorch": [10, 12, 80, 103], "sqlitedataset": [11, 12], "databas": [12, 33, 35, 36, 38, 77, 84, 103], "i3fram": [13, 14, 16, 28, 29, 44], "frame": [13, 14, 16, 26, 29, 30, 35, 44], "i3extractorcollect": [13, 14], "i3featureextractoricecube86": [13, 15], "i3featureextractoricecubedeepcor": [13, 15], "i3featureextractoricecubeupgrad": [13, 15], "i3pulsenoisetruthflagicecubeupgrad": [13, 15], "i3galacticplanehybridrecoextractor": [13, 17], "i3ntmuonlabelextractor": [13, 18], "i3splinempeicextractor": [13, 23], "__call__": 14, "icetrai": [14, 28, 29, 44, 97], "keep": 14, "proven": 14, "set_fil": 14, "refer": [14, 53, 90], "being": [14, 44, 72, 74], "get": [14, 28, 51, 80, 83, 103], "treat": 14, "86": [15, 52], "nois": [15, 28, 44], "flag": [15, 44], "exclude_kei": 16, "dynam": [16, 48, 56, 57, 58], "pars": [16, 78, 85, 86, 87, 88, 93], "call": [16, 29, 35, 49, 74, 77, 80, 84, 98], "tri": [16, 29], "automat": [16, 64, 82, 101], "cast": [16, 29], "done": [16, 49, 98, 101], "recurs": [16, 29, 92, 96], "each": [16, 27, 29, 36, 38, 39, 46, 49, 52, 53, 56, 58, 62, 64, 65, 67, 68, 72, 74, 75, 77, 78, 80, 83, 96], "look": [16, 103], "member": [16, 29, 67, 90, 91, 98], "variabl": [16, 29, 56, 67, 68, 75, 84, 98], "signatur": [16, 29], "similar": [16, 29, 103], "handl": [16, 82, 86, 95, 98], "hand": 16, "mc": [16, 35, 36], "tree": [16, 35], "trigger": 16, "exclud": [16, 38, 103], "valueerror": [16, 69], "hybrid": 17, "galatict": 17, "plane": [17, 82], "tum": [18, 25], "dnn": [18, 25], "padding_valu": [18, 21], "northeren": 18, "i3particl": 19, "other": [19, 36, 62, 82, 101], "algorithm": 19, "comparison": [19, 82], "quantiti": [20, 74, 75], "queso": 21, "retro": [22, 33], "splinemp": 23, "border": 24, "mctree": [24, 28], "ndarrai": [24, 64, 68, 84], "arrai": [24, 27, 67, 68], "boundari": 24, "volum": 24, "coordin": [24, 51, 63, 67, 75], "particl": [24, 36, 81], "start": [24, 101, 103], "stop": [24, 80, 86], "within": [24, 46, 48, 49, 56, 62], "hard": 24, "i3mctre": 24, "flatten_nested_dictionari": [26, 27], "serialis": [26, 27], "transpose_list_of_dict": [26, 27], "frame_is_montecarlo": [26, 28], "frame_is_nois": [26, 28], "get_om_keys_and_pulseseri": [26, 28], "is_boost_enum": [26, 29], "is_boost_class": [26, 29], "is_icecube_class": [26, 29], "is_typ": [26, 29], "is_method": [26, 29], "break_cyclic_recurs": [26, 29], "get_member_vari": [26, 29], "cast_object_to_pure_python": [26, 29], "cast_pulse_series_to_pure_python": [26, 29], "manipul": [27, 60, 61, 66], "obj": [27, 29, 92], "parent_kei": 27, "flatten": 27, "nest": 27, "dictionari": [27, 28, 29, 33, 35, 64, 65, 77, 78, 88, 90, 91, 93], "non": [27, 29, 35, 36, 82], "exampl": [27, 40, 46, 49, 68, 82, 90, 91, 103], "d": [27, 63, 64, 67, 101], "b": [27, 46, 49], "c": [27, 49, 63, 82, 103], "2": [27, 49, 56, 58, 62, 65, 68, 73, 75, 77, 78, 82, 90, 103], "a__b": 27, "applic": 27, "combin": [27, 90], "parent": 27, "__": [27, 29], "nester": 27, "json": [27, 90], "therefor": 27, "we": [27, 29, 40, 68, 101, 103], "outer": 27, "abl": [27, 103], "de": 27, "transpos": 27, "mont": 28, "carlo": 28, "simul": [28, 44], "pulseseri": 28, "calibr": [28, 29], "gcd_dict": [28, 29], "p": [28, 35, 82], "om": [28, 29], "dataclass": 28, "i3calibr": 28, "indicesfor": 28, "boost": 29, "enum": 29, "ensur": [29, 39, 82, 98, 101, 103], "isn": 29, "return_discard": 29, "valid": [29, 40, 70, 74, 80, 82, 86, 88, 93], "mangl": 29, "take": [29, 35, 49, 101], "mainli": 29, "cannot": [29, 88, 93], "trivial": [29, 74], "doe": [29, 72, 74, 91], "try": 29, "equival": 29, "its": 29, "like": [29, 49, 63, 74, 75, 82, 99, 101], "otherwis": [29, 82], "itself": [29, 74], "deem": 29, "wai": [29, 40, 101, 103], "optic": 29, "found": [29, 68, 82], "log_fold": [30, 98], "skip": [30, 56], "null": [30, 36], "filter_nam": 30, "filter_ani": 30, "filtermask": 30, "initi": [30, 63], "true": [30, 35, 36, 44, 58, 64, 67, 77, 80, 82, 84, 90, 91, 93, 96], "kept": 30, "fals": [30, 44, 56, 64, 69, 77, 80, 82, 84, 90], "parquetdataconvert": [31, 32], "module_dict": 33, "devic": 33, "retro_table_nam": 33, "n_worker": [33, 77], "pipeline_nam": 33, "creat": [33, 35, 36, 64, 67, 88, 89, 93, 101, 103], "initialis": [33, 91], "gnn_module_for_energy_regress": 33, "modulelist": 33, "comput": [33, 63, 70, 74, 75, 82], "directori": [33, 38, 77, 80, 96], "100": [33, 103], "size": [33, 48, 49, 56, 57, 58, 86], "alreadi": [33, 36, 103], "error": [33, 82, 98, 101], "prompt": 33, "avoid": [33, 98, 101], "overwrit": [33, 80], "sqlitedataconvert": [34, 35, 103], "construct_datafram": [34, 35], "is_pulse_map": [34, 35], "is_mc_tre": [34, 35], "database_exist": [34, 36], "database_table_exist": [34, 36], "run_sql_cod": [34, 36], "save_to_sql": [34, 36], "attach_index": [34, 36], "create_t": [34, 36], "create_table_and_save_to_sql": [34, 36], "db": [35, 83], "max_table_s": 35, "maximum": [35, 49, 68, 74, 86], "row": [35, 36, 68], "exce": 35, "limit": [35, 82], "any_pulsemap_is_non_empti": 35, "data_dict": 35, "empti": [35, 44], "retriev": [35, 51], "splitinicepuls": 35, "least": [35, 101, 103], "becaus": [35, 39], "instead": [35, 82, 88, 93], "alwai": [35, 83], "panda": [35, 40, 84], "datafram": [35, 36, 40, 51, 70, 77, 83, 84], "table_nam": [35, 36], "database_path": [36, 77, 84], "df": 36, "must": [36, 46, 80, 84, 101], "attach": 36, "default_typ": 36, "integer_primary_kei": 36, "NOT": [36, 82], "integ": [36, 56, 57, 82], "primari": 36, "Such": 36, "appropri": [36, 74], "expect": [36, 40, 44, 64, 67], "doesn": 36, "parquettosqliteconvert": [37, 38], "pairwise_shuffl": [37, 39], "stringselectionresolv": [37, 40], "parquet_path": 38, "mc_truth_tabl": 38, "excluded_field": 38, "id": [38, 51, 64], "everi": [38, 103], "field": [38, 78, 81, 88, 90, 91, 93, 95], "One": [38, 78], "choos": 38, "argument": [38, 80, 84, 86, 88, 90, 91, 93], "exclude_field": 38, "database_nam": 38, "convers": [38, 103], "rng": 39, "relat": [39, 96], "i3_list": [39, 96], "gcd_list": [39, 96], "correpond": 39, "handi": 39, "even": 39, "files_list": 39, "gcd_shuffl": 39, "i3_shuffl": 39, "use_cach": 40, "flexibl": 40, "below": [40, 78, 84, 101, 103], "show": [40, 80], "involv": 40, "cover": 40, "yml": [40, 86, 90, 91], "test": [40, 74, 83, 90, 97, 101], "50000": [40, 90], "ab": [40, 82, 90], "12": [40, 90], "14": [40, 90], "16": [40, 90], "13": [40, 103], "compat": [40, 74], "syntax": [40, 82], "mai": [40, 67, 103], "fix": 40, "randomli": [40, 64, 65, 91], "graphnet_modul": [41, 42], "graphneti3modul": [42, 44], "i3inferencemodul": [42, 44], "i3pulsecleanermodul": [42, 44], "pulsemap_extractor": 44, "produc": [44, 81, 84], "write": [44, 103], "constructor": 44, "knngraph": [44, 60, 65], "associ": [44, 64, 68, 73, 74, 82], "model_config": [44, 85, 87, 88, 90, 93], "state_dict": [44, 69, 95], "model_nam": [44, 77], "prediction_column": [44, 70, 83], "pulsmap": 44, "modelconfig": [44, 69, 87, 90, 91], "summar": [44, 67, 68], "Will": [44, 62], "help": [44, 86, 101], "entri": [44, 56, 78, 86], "dynedg": [44, 45, 54, 57, 58], "energy_reco": 44, "discard_empty_ev": 44, "clean": [44, 101, 103], "assum": [44, 51, 64, 68, 74, 75], "7": [44, 49, 77], "consid": [44, 103], "posit": [44, 49, 68, 73], "signal": 44, "els": 44, "elimin": 44, "speed": [44, 63], "especi": 44, "sinc": [44, 82], "further": 44, "calcul": [44, 62, 65, 70, 75, 81, 82], "convnet": [45, 54], "dynedge_jinst": [45, 54], "dynedge_kaggle_tito": [45, 54], "edg": [45, 48, 49, 56, 57, 58, 60, 63, 64, 65, 66, 67, 75], "unbatch_edge_index": [45, 46], "attributecoarsen": [45, 46], "domcoarsen": [45, 46], "customdomcoarsen": [45, 46], "domandtimewindowcoarsen": [45, 46], "standardmodel": [45, 70], "calculate_xyzt_homophili": [45, 75], "calculate_distance_matrix": [45, 75], "knn_graph_batch": [45, 75], "oper": [46, 48, 54, 56], "cluster": [46, 48, 49, 56, 58, 67, 68], "local": [46, 52, 53, 86], "edge_index": [46, 48, 75], "vector": [46, 49, 82], "longtensor": [46, 49, 75], "mathbf": [46, 49], "ldot": [46, 49], "n": [46, 49, 63, 82], "reduce_opt": 46, "avg": 46, "avg_pool": 46, "avg_pool_x": 46, "max": [46, 48, 56, 58, 82, 86], "max_pool": [46, 49], "max_pool_x": [46, 49], "min": [46, 49, 56, 58], "min_pool": [46, 47, 49], "min_pool_x": [46, 47, 49], "sum": [46, 49, 56, 58, 70], "sum_pool": [46, 47, 49], "sum_pool_x": [46, 47, 49], "forward": [46, 48, 51, 55, 56, 57, 58, 59, 62, 64, 67, 70, 74, 82], "simplecoarsen": 46, "addit": [46, 48, 70, 82, 84], "window": 46, "time_window": 46, "dynedgeconv": [47, 48, 56], "edgeconvtito": [47, 48], "dyntran": [47, 48, 58], "sum_pool_and_distribut": [47, 49], "group_bi": [47, 49], "group_pulses_to_dom": [47, 49], "group_pulses_to_pmt": [47, 49], "std_pool_x": [47, 49], "std_pool": [47, 49], "aggr": 48, "nb_neighbor": 48, "features_subset": [48, 56, 58], "edgeconv": 48, "lightningmodul": [48, 69, 80, 98], "convolut": [48, 55, 56, 57, 58], "mlp": [48, 56], "aggreg": [48, 49], "8": [48, 49, 56, 65, 82, 83, 101, 103], "neighbour": [48, 56, 58, 62, 63, 65, 75], "after": [48, 56, 58, 80, 86, 90], "sequenc": [48, 68, 83], "slice": [48, 56], "sparsetensor": 48, "messagepass": 48, "tito": [48, 58], "solut": [48, 58, 101], "deep": [48, 58], "competit": [48, 52, 58], "reset_paramet": 48, "reset": 48, "learnabl": [48, 54, 55, 56, 57, 58, 59, 74], "messag": [48, 80, 98], "x_i": 48, "x_j": 48, "layer_s": 48, "n_head": 48, "dyntrans1": 48, "head": [48, 74], "multiheadattent": 48, "just": [49, 103], "negat": 49, "cluster_index": 49, "distribut": [49, 56, 73, 74, 82, 84], "ident": [49, 74], "pmt": [49, 68], "f1": 49, "f2": 49, "6": [49, 78], "groupbi": 49, "3": [49, 55, 58, 63, 68, 73, 75, 77, 78, 82, 101, 103], "matrix": [49, 62, 63, 75, 82], "mathbb": 49, "r": [49, 62, 103], "n_1": 49, "n_b": 49, "obtain": [49, 82], "wise": 49, "dens": 49, "fc": 49, "known": 49, "std": 49, "repres": [49, 64, 65, 67, 68, 88, 90, 91], "averag": [49, 82], "torch_geometr": 49, "version": [49, 68, 74, 80, 101, 103], "standardis": 50, "icecubekaggl": [50, 52], "icecubedeepcor": [50, 52], "icecubeupgrad": [50, 52], "orca150": [50, 53], "ins": 51, "feature_map": [51, 52, 53], "input_featur": [51, 64], "input_feature_nam": [51, 64, 65, 67], "adjac": 51, "geometry_t": [51, 52, 53], "public": [51, 84], "geometri": [51, 64], "string_index_nam": 51, "sensor_position_nam": 51, "xyz": [51, 52, 53, 67, 68], "sensor_index_nam": 51, "sensor": [51, 64], "geometry_table_path": [52, 53], "home": [52, 53, 86, 103], "runner": [52, 53, 86], "lib": [52, 53, 86, 103], "python3": [52, 53, 86], "string_id_column": [52, 53], "sensor_id_column": [52, 53], "sensor_id": [52, 53], "dimens": [52, 53, 55, 56, 58, 68, 74, 82], "icecube_upgrad": 52, "prototyp": 53, "orca_150": 53, "sensor_string_id": 53, "dynedgejinst": [54, 57], "dynedgetito": [54, 58], "author": [55, 57, 82], "martin": 55, "minh": 55, "nb_input": [55, 56, 57, 58, 59, 72, 73, 74], "nb_output": [55, 57, 59, 67, 72, 73, 74], "nb_intermedi": 55, "128": [55, 56, 86], "dropout_ratio": 55, "fraction": [55, 83], "drop": 55, "nb_neighbour": 56, "k": [56, 58, 62, 65, 75, 82], "nearest": [56, 58, 62, 63, 65, 75], "latent": [56, 58, 72, 73, 74], "metric": [56, 58, 63, 80], "dynedge_layer_s": 56, "dimenion": [56, 58], "multi": 56, "perceptron": 56, "256": [56, 58], "336": 56, "post_processing_layer_s": 56, "hidden": [56, 57], "readout_layer_s": 56, "post": [56, 58], "_and_": 56, "As": 56, "last": [56, 72, 73, 74, 80, 83], "global_pooling_schem": [56, 58], "scheme": [56, 58], "add_global_variables_after_pool": 56, "altern": [56, 82, 101], "exact": [57, 82], "2209": 57, "03042": 57, "oerso": 57, "layer_size_scal": 57, "4": [57, 73, 78], "scale": [57, 63, 64, 74, 82], "ic": 58, "univers": 58, "south": 58, "pole": 58, "dyntrans_layer_s": 58, "use_global_featur": 58, "use_post_processing_lay": 58, "core": 59, "edgedefinit": [60, 61, 62, 63, 64, 66], "how": [60, 61, 66], "drawn": [60, 61, 65, 66], "between": [60, 61, 62, 63, 66, 70, 74, 75, 80, 82, 90, 91], "minkowski": [60, 61], "lex_sort": [60, 68], "gather_cluster_sequ": [60, 68], "identify_indic": [60, 68], "cluster_summarize_with_percentil": [60, 68], "knnedg": [61, 62], "radialedg": [61, 62], "euclideanedg": [61, 62], "compute_minkowski_distance_mat": [61, 63], "minkowskiknnedg": [61, 63], "_construct_edg": 62, "definit": [62, 64, 65, 67, 69, 74, 101], "nb_nearest_neighbour": [62, 63, 65], "space": [62, 63, 74, 84], "distanc": [62, 63, 65, 75], "sphere": 62, "chosen": [62, 68, 98], "radiu": 62, "centr": 62, "radial": 62, "center": 62, "euclidean": [62, 101], "see": [62, 64, 80, 101, 103], "http": [62, 64, 82, 101], "arxiv": [62, 82], "org": [62, 82, 103], "pdf": 62, "1809": 62, "06166": 62, "space_coord": 63, "time_coord": 63, "pairwis": [63, 75], "shape": [63, 64, 67, 75, 82], "second": 63, "m": [63, 68, 82], "light": [63, 103], "time_like_weight": 63, "prefer": [63, 103], "over": 63, "time_lik": 63, "hold": 64, "alter": 64, "dure": [64, 74, 80], "node_definit": [64, 65], "edge_definit": 64, "nodedefinit": [64, 65, 66, 67], "nodesaspuls": [64, 66, 67], "perturbation_dict": [64, 65], "deviat": [64, 65], "perturb": [64, 65], "add_inactive_sensor": 64, "inact": 64, "append": 64, "pad": [64, 68], "sensor_mask": 64, "mask": 64, "string_mask": 64, "sort_bi": 64, "sort": [64, 68], "truth_dict": 64, "custom_label_funct": 64, "loss_weight": [64, 74], "data_path": 64, "num_row": 64, "github": [64, 82, 103], "com": [64, 82, 103], "team": [64, 101], "blob": [64, 82], "getting_start": 64, "md": 64, "where": [64, 65, 67, 68, 81], "your": [65, 101, 103], "percentileclust": [66, 67], "num_puls": 67, "node_feature_nam": 67, "new_features_nam": 67, "overridden": 67, "set_number_of_input": 67, "set_output_feature_nam": 67, "measur": [67, 68, 75], "cherenkov": [67, 68], "radiat": [67, 68], "percentil": [67, 68], "summari": [67, 68], "cluster_on": [67, 68], "50": [67, 68, 86], "90": [67, 68, 78], "add_count": [67, 68], "duplic": 67, "cluster_column": 68, "numpi": 68, "along": 68, "backward": [68, 82], "feature_idx": 68, "turn": [68, 101], "gather": 68, "nan": 68, "n_cluster": 68, "l": 68, "largest": 68, "suppos": 68, "n_pmt": 68, "three": [68, 82], "spatial": 68, "column_offset": 68, "feature_nam": 68, "summarization_indic": 68, "cluster_indic": 68, "save_state_dict": 69, "load_state_dict": 69, "karg": 69, "trust": 69, "enough": 69, "eval": [69, 103], "lambda": 69, "consequ": 69, "backbon": 70, "train_dataload": 70, "val_dataload": 70, "max_epoch": 70, "early_stopping_pati": [70, 93], "gpu": [70, 86, 103], "ckpt_path": 70, "log_every_n_step": 70, "gradient_clip_v": 70, "distribution_strategi": 70, "trainer_kwarg": 70, "pytorch_lightn": [70, 80, 98], "trainer": [70, 80, 83], "target_label": [70, 74], "target": [70, 72, 74, 82, 93], "prediction_label": [70, 74], "configure_optim": 70, "optim": [70, 80], "shared_step": 70, "batch_idx": 70, "share": 70, "step": [70, 80], "training_step": 70, "train_batch": 70, "validation_step": 70, "val_batch": 70, "compute_loss": [70, 74], "pred": [70, 74], "verbos": [70, 80], "activ": [70, 74, 101, 103], "mode": [70, 74], "deactiv": [70, 74], "predict_as_datafram": 70, "additional_attribut": [70, 83], "multiclassclassificationtask": [71, 72], "binaryclassificationtask": [71, 72], "binaryclassificationtasklogit": [71, 72], "azimuthreconstructionwithkappa": [71, 73], "azimuthreconstruct": [71, 73], "directionreconstructionwithkappa": [71, 73], "zenithreconstruct": [71, 73], "zenithreconstructionwithkappa": [71, 73], "energyreconstruct": [71, 73], "energyreconstructionwithpow": [71, 73], "energytcreconstruct": [71, 73], "energyreconstructionwithuncertainti": [71, 73], "vertexreconstruct": [71, 73], "positionreconstruct": [71, 73], "timereconstruct": [71, 73], "inelasticityreconstruct": [71, 73], "learnedtask": [71, 74], "standardlearnedtask": [71, 72, 73, 74], "identitytask": [71, 72, 74], "standardflowtask": [71, 74], "classifi": 72, "untransform": 72, "logit": [72, 82], "embed": [72, 74], "binari": [72, 82], "hidden_s": [72, 73, 74], "default_target_label": [72, 73, 74], "default_prediction_label": [72, 73, 74], "target_pr": 72, "angl": [73, 81], "kappa": [73, 82], "var": 73, "azimuth_pr": 73, "azimuth_kappa": 73, "3d": [73, 82], "vmf": 73, "dir_x_pr": 73, "dir_y_pr": 73, "dir_z_pr": 73, "direction_kappa": 73, "zenith_pr": 73, "zenith_kappa": 73, "stabl": [73, 74], "energy_pr": 73, "cascad": 73, "energy_track_pr": 73, "energy_cascade_pr": 73, "uncertainti": 73, "energy_sigma": 73, "vertex": 73, "position_x_pr": 73, "position_y_pr": 73, "position_z_pr": 73, "interaction_time_pr": 73, "interact": 73, "hadron": 73, "inelasticity_pr": 73, "lossfunct": [74, 79, 82], "auto": 74, "matic": 74, "_pred": 74, "transform_prediction_and_target": 74, "numer": 74, "transform_target": 74, "log10": [74, 84], "rather": [74, 98], "conjunct": 74, "transform_infer": 74, "invers": 74, "recov": 74, "transform_support": 74, "minimum": 74, "restrict": [74, 82], "invert": 74, "1e6": 74, "train_ev": 74, "act": [74, 82], "wrt": 74, "meet": 74, "come": [74, 101], "vast": 74, "major": 74, "supervis": 74, "grab": 74, "send": 74, "evalu": 74, "normalizingflow": 74, "jacobian": 74, "normal": 74, "flow": 74, "xyzt": 75, "homophili": 75, "notic": [75, 82], "xyz_coord": 75, "nb_dom": 75, "updat": [75, 77, 80], "config_updat": [76, 77], "weightfitt": [76, 77, 79, 84], "contourfitt": [76, 77], "read_entri": [76, 78], "plot_2d_contour": [76, 78], "plot_1d_contour": [76, 78], "contour": [77, 78], "config_path": 77, "new_config_path": 77, "dummy_sect": 77, "temp": 77, "dummi": 77, "section": 77, "header": 77, "configupdat": 77, "programat": 77, "statistical_fit": 77, "fit_weight": [77, 84], "config_outdir": 77, "weight_nam": [77, 84], "pisa_config_dict": 77, "add_to_databas": [77, 84], "flux": 77, "_database_path": 77, "statist": 77, "effect": [77, 80, 101], "account": 77, "systemat": 77, "hypersurfac": 77, "chang": [77, 82, 101], "assumpt": 77, "regard": 77, "pipeline_path": 77, "post_fix": 77, "include_retro": 77, "fit_1d_contour": 77, "run_nam": 77, "config_dict": 77, "grid_siz": 77, "theta23_minmax": 77, "36": 77, "54": 77, "dm31_minmax": 77, "1d": [77, 78], "fit_2d_contour": 77, "2d": [77, 78, 82], "content": 78, "contour_data": 78, "xlim": 78, "ylim": 78, "0023799999999999997": 78, "0025499999999999997": 78, "chi2_critical_valu": 78, "width": 78, "height": 78, "path_to_pisa_fit_result": 78, "name_of_my_model_in_fit": 78, "legend": 78, "color": 78, "linestyl": 78, "style": [78, 101], "line": [78, 80, 86], "upper": 78, "axi": 78, "605": 78, "critic": [78, 98], "chi2": 78, "cl": 78, "right": [78, 82], "176": 78, "inch": 78, "388": 78, "706": 78, "abov": [78, 82, 84, 103], "352": 78, "piecewiselinearlr": [79, 80], "progressbar": [79, 80], "graphnetearlystop": [79, 80], "mseloss": [79, 82], "rmseloss": [79, 82], "logcoshloss": [79, 82], "crossentropyloss": [79, 82], "binarycrossentropyloss": [79, 82], "logcmk": [79, 82], "vonmisesfisherloss": [79, 82], "vonmisesfisher2dloss": [79, 82], "euclideandistanceloss": [79, 82], "vonmisesfisher3dloss": [79, 82], "collator_sequence_bucklet": [79, 83], "make_dataload": [79, 83], "make_train_validation_dataload": [79, 83], "get_predict": [79, 83], "save_result": [79, 83], "uniform": [79, 84], "bjoernlow": [79, 84], "mileston": 80, "factor": 80, "last_epoch": 80, "_lrschedul": 80, "interpol": 80, "linearli": 80, "denot": 80, "multipli": 80, "closest": 80, "vice": 80, "versa": 80, "wrap": [80, 90, 91], "epoch": [80, 86], "print": [80, 98], "stdout": 80, "get_lr": 80, "refresh_r": 80, "process_posit": 80, "tqdmprogressbar": 80, "progress": 80, "bar": 80, "customis": 80, "lightn": 80, "init_validation_tqdm": 80, "overrid": 80, "init_predict_tqdm": 80, "init_test_tqdm": 80, "init_train_tqdm": 80, "get_metr": 80, "on_train_epoch_start": 80, "previou": 80, "behaviour": 80, "on_train_epoch_end": 80, "don": [80, 103], "duplciat": 80, "save_dir": 80, "earlystop": 80, "earli": [80, 86], "keyword": [80, 88, 93], "setup": [80, 103], "graphnet_model": 80, "stage": 80, "on_validation_end": 80, "on_fit_end": 80, "runtim": [81, 103], "azimuth_kei": 81, "zenith_kei": 81, "access": [81, 103], "azimiuth": 81, "return_el": 82, "elementwis": 82, "term": 82, "squar": 82, "root": [82, 103], "cosh": 82, "small": 82, "cross": 82, "entropi": 82, "num_class": 82, "softmax": 82, "ed": 82, "probabl": 82, "mit": 82, "licens": 82, "copyright": 82, "2019": 82, "ryabinin": 82, "permiss": 82, "herebi": 82, "person": 82, "copi": 82, "document": 82, "deal": 82, "modifi": 82, "publish": 82, "sublicens": 82, "sell": 82, "permit": 82, "whom": 82, "furnish": 82, "so": [82, 103], "subject": 82, "condit": 82, "shall": 82, "substanti": 82, "portion": 82, "THE": 82, "AS": 82, "warranti": 82, "OF": 82, "kind": 82, "OR": 82, "impli": 82, "BUT": 82, "TO": 82, "merchant": 82, "FOR": 82, "particular": [82, 101], "AND": 82, "noninfring": 82, "IN": 82, "NO": 82, "holder": 82, "BE": 82, "liabl": 82, "claim": 82, "damag": 82, "liabil": 82, "action": 82, "contract": 82, "tort": 82, "aris": 82, "WITH": 82, "_____________________": 82, "mryab": 82, "vmf_loss": 82, "master": 82, "py": [82, 103], "bessel": 82, "exponenti": 82, "ditto": 82, "iv": 82, "1812": 82, "04616": 82, "spite": 82, "suggest": 82, "sec": 82, "paper": 82, "correct": 82, "static": [82, 101], "ctx": 82, "grad_output": 82, "von": 82, "mise": 82, "fisher": 82, "log_cmk_exact": 82, "c_": 82, "exactli": [82, 98], "log_cmk_approx": 82, "approx": 82, "minu": 82, "sign": 82, "log_cmk": 82, "kappa_switch": 82, "diverg": 82, "700": 82, "float64": 82, "precis": 82, "unaccur": 82, "switch": 82, "batch_split": 83, "bucket": 83, "cut": 83, "mini": 83, "total": 83, "explicitli": 83, "respect": 83, "database_indic": 83, "test_siz": 83, "node_level": 83, "tag": [83, 101, 103], "archiv": 83, "uniformweightfitt": 84, "bin": 84, "privat": 84, "_fit_weight": 84, "sql": 84, "desir": [84, 96], "np": 84, "happen": 84, "x_low": 84, "wherea": 84, "curv": 84, "base_config": [85, 87], "dataset_config": [85, 87], "training_config": [85, 87], "argumentpars": [85, 86], "rename_state_dict_entri": [85, 95], "is_gcd_fil": [85, 96], "is_i3_fil": [85, 96], "has_extens": [85, 96], "find_i3_fil": [85, 96], "has_icecube_packag": [85, 97], "has_torch_packag": [85, 97], "has_pisa_packag": [85, 97], "requires_icecub": [85, 97], "repeatfilt": [85, 98], "eps_lik": [85, 99], "consist": [86, 98, 101], "cli": 86, "pop_default": 86, "usag": 86, "descript": 86, "command": [86, 103], "standard_argu": 86, "training_example_data_sqlit": 86, "patienc": 86, "narg": 86, "example_energy_reconstruction_model": 86, "num": 86, "fetch": 86, "with_standard_argu": 86, "overwritten": [86, 88], "baseconfig": [87, 88, 89, 90, 91, 93], "get_all_argument_valu": [87, 88], "save_dataset_config": [87, 90], "datasetconfigsavermeta": [87, 90], "datasetconfigsaverabcmeta": [87, 90], "save_model_config": [87, 91], "modelconfigsavermeta": [87, 91], "modelconfigsaverabc": [87, 91], "traverse_and_appli": [87, 92], "list_all_submodul": [87, 92], "get_all_grapnet_class": [87, 92], "is_graphnet_modul": [87, 92], "is_graphnet_class": [87, 92], "get_graphnet_class": [87, 92], "trainingconfig": [87, 93], "basemodel": [88, 90, 91], "validationerror": [88, 93], "pydantic_cor": [88, 93], "__init__": [88, 90, 91, 93, 103], "__pydantic_self__": [88, 93], "dump": [88, 90, 91], "yaml": [88, 89], "as_dict": [88, 90, 91], "classvar": [88, 90, 91, 93], "configdict": [88, 90, 91, 93], "conform": [88, 90, 91, 93], "pydant": [88, 90, 91, 93], "model_field": [88, 90, 91, 93], "fieldinfo": [88, 90, 91, 93], "metadata": [88, 90, 91, 93], "about": [88, 90, 91, 93], "__fields__": [88, 90, 91, 93], "v1": [88, 90, 91, 93, 103], "re": [89, 103], "save_config": 89, "dataconfig": 90, "transpar": [90, 91, 101], "reproduc": [90, 91], "In": [90, 91, 103], "session": [90, 91], "anoth": [90, 91], "you": [90, 91, 101, 103], "still": 90, "csv": 90, "train_select": 90, "test_select": 90, "unambigu": [90, 91], "annot": [90, 91, 93], "nonetyp": 90, "init_fn": [90, 91], "metaclass": [90, 91], "abcmeta": [90, 91], "datasetconfigsav": 90, "trainabl": 91, "hyperparamet": 91, "instanti": 91, "thu": 91, "modelconfigsav": 91, "fn_kwarg": 92, "structur": 92, "moduletyp": 92, "grapnet": 92, "lookup": 92, "deprec": 95, "transit": 95, "old_phras": 95, "new_phras": 95, "deepcopi": 95, "who": 95, "renam": 95, "phrase": 95, "place": [95, 101], "system": [96, 103], "filenam": 96, "dir": 96, "search": 96, "test_funct": 97, "repeat": 98, "nb_repeats_allow": 98, "record": 98, "logrecord": 98, "clear": 98, "intuit": 98, "composit": 98, "loggeradapt": 98, "clash": 98, "setlevel": 98, "deleg": 98, "msg": 98, "warn": 98, "info": [98, 103], "debug": 98, "warning_onc": 98, "onc": 98, "handler": 98, "file_handl": 98, "filehandl": 98, "stream_handl": 98, "streamhandl": 98, "assort": 99, "ep": 99, "api": 100, "To": [101, 103], "sure": [101, 103], "smooth": 101, "guidelin": 101, "guid": 101, "encourag": 101, "contributor": 101, "discuss": 101, "bug": 101, "anyth": 101, "describ": 101, "yourself": 101, "ownership": 101, "prioriti": 101, "situat": 101, "lot": 101, "effort": 101, "go": 101, "outsid": 101, "scope": 101, "better": 101, "fork": 101, "repo": 101, "dedic": 101, "branch": [101, 103], "repositori": 101, "own": [101, 103], "accept": 101, "autom": 101, "review": 101, "pep8": 101, "docstr": 101, "googl": 101, "hint": 101, "adher": 101, "pep": 101, "pylint": 101, "flake8": 101, "black": 101, "well": 101, "recommend": [101, 103], "mypi": 101, "pydocstyl": 101, "docformatt": 101, "commit": 101, "hook": 101, "instal": 101, "pip": [101, 103], "Then": 101, "everytim": 101, "pep257": 101, "concept": 101, "ljvmiranda921": 101, "io": 101, "notebook": 101, "2018": 101, "06": 101, "21": 101, "precommit": 101, "environ": 103, "virtual": 103, "anaconda": 103, "prove": 103, "instruct": 103, "want": 103, "part": 103, "achiev": 103, "bash": 103, "shell": 103, "cvmf": 103, "opensciencegrid": 103, "py3": 103, "v4": 103, "sh": 103, "rhel_7_x86_64": 103, "metaproject": 103, "env": 103, "alia": 103, "script": 103, "With": 103, "now": 103, "extra": 103, "geometr": 103, "won": 103, "later": 103, "torch_cpu": 103, "txt": 103, "cpu": 103, "torch_gpu": 103, "unix": 103, "git": 103, "clone": 103, "usernam": 103, "cd": 103, "conda": 103, "gcc_linux": 103, "64": 103, "gxx_linux": 103, "libgcc": 103, "cudatoolkit": 103, "11": 103, "forg": 103, "box": 103, "compil": 103, "gcc": 103, "date": 103, "possibli": 103, "cuda": 103, "toolkit": 103, "recent": 103, "omit": 103, "newer": 103, "export": 103, "ld_library_path": 103, "anaconda3": 103, "miniconda3": 103, "bashrc": 103, "librari": 103, "intend": 103, "rm": 103, "asogaard": 103, "latest": 103, "dc423315742c": 103, "01_icetrai": 103, "01_convert_i3_fil": 103, "2023": 103, "01": 103, "24": 103, "41": 103, "27": 103, "graphnet_20230124": 103, "134127": 103, "46": 103, "convert_i3_fil": 103, "ic86": 103, "thread": 103, "00": 103, "79": 103, "42": 103, "26": 103, "413": 103, "88it": 103, "specialis": 103, "ones": 103, "push": 103, "vx": 103}, "objects": {"": [[1, 0, 0, "-", "graphnet"]], "graphnet": [[2, 0, 0, "-", "constants"], [3, 0, 0, "-", "data"], [41, 0, 0, "-", "deployment"], [45, 0, 0, "-", "models"], [76, 0, 0, "-", "pisa"], [79, 0, 0, "-", "training"], [85, 0, 0, "-", "utilities"]], "graphnet.data": [[4, 0, 0, "-", "constants"], [5, 0, 0, "-", "dataconverter"], [6, 0, 0, "-", "dataloader"], [7, 0, 0, "-", "dataset"], [13, 0, 0, "-", "extractors"], [30, 0, 0, "-", "filters"], [31, 0, 0, "-", "parquet"], [33, 0, 0, "-", "pipeline"], [34, 0, 0, "-", "sqlite"], [37, 0, 0, "-", "utilities"]], "graphnet.data.constants": [[4, 1, 1, "", "FEATURES"], [4, 1, 1, "", "TRUTH"]], "graphnet.data.constants.FEATURES": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.constants.TRUTH": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.dataconverter": [[5, 1, 1, "", "DataConverter"], [5, 1, 1, "", "FileSet"], [5, 5, 1, "", "cache_output_files"], [5, 5, 1, "", "init_global_index"]], "graphnet.data.dataconverter.DataConverter": [[5, 3, 1, "", "execute"], [5, 4, 1, "", "file_suffix"], [5, 3, 1, "", "get_map_function"], [5, 3, 1, "", "merge_files"], [5, 3, 1, "", "save_data"]], "graphnet.data.dataconverter.FileSet": [[5, 2, 1, "", "gcd_file"], [5, 2, 1, "", "i3_file"]], "graphnet.data.dataloader": [[6, 1, 1, "", "DataLoader"], [6, 5, 1, "", "collate_fn"], [6, 5, 1, "", "do_shuffle"]], "graphnet.data.dataloader.DataLoader": [[6, 3, 1, "", "from_dataset_config"]], "graphnet.data.dataset": [[8, 0, 0, "-", "dataset"], [9, 0, 0, "-", "parquet"], [11, 0, 0, "-", "sqlite"]], "graphnet.data.dataset.dataset": [[8, 6, 1, "", "ColumnMissingException"], [8, 1, 1, "", "Dataset"], [8, 1, 1, "", "EnsembleDataset"], [8, 5, 1, "", "load_module"], [8, 5, 1, "", "parse_graph_definition"]], "graphnet.data.dataset.dataset.Dataset": [[8, 3, 1, "", "add_label"], [8, 3, 1, "", "concatenate"], [8, 3, 1, "", "from_config"], [8, 4, 1, "", "path"], [8, 3, 1, "", "query_table"], [8, 4, 1, "", "truth_table"]], "graphnet.data.dataset.parquet": [[10, 0, 0, "-", "parquet_dataset"]], "graphnet.data.dataset.parquet.parquet_dataset": [[10, 1, 1, "", "ParquetDataset"]], "graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset": [[10, 3, 1, "", "query_table"]], "graphnet.data.dataset.sqlite": [[12, 0, 0, "-", "sqlite_dataset"]], "graphnet.data.dataset.sqlite.sqlite_dataset": [[12, 1, 1, "", "SQLiteDataset"]], "graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset": [[12, 3, 1, "", "query_table"]], "graphnet.data.extractors": [[14, 0, 0, "-", "i3extractor"], [15, 0, 0, "-", "i3featureextractor"], [16, 0, 0, "-", "i3genericextractor"], [17, 0, 0, "-", "i3hybridrecoextractor"], [18, 0, 0, "-", "i3ntmuonlabelsextractor"], [19, 0, 0, "-", "i3particleextractor"], [20, 0, 0, "-", "i3pisaextractor"], [21, 0, 0, "-", "i3quesoextractor"], [22, 0, 0, "-", "i3retroextractor"], [23, 0, 0, "-", "i3splinempeextractor"], [24, 0, 0, "-", "i3truthextractor"], [25, 0, 0, "-", "i3tumextractor"], [26, 0, 0, "-", "utilities"]], "graphnet.data.extractors.i3extractor": [[14, 1, 1, "", "I3Extractor"], [14, 1, 1, "", "I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor.I3Extractor": [[14, 4, 1, "", "name"], [14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3extractor.I3ExtractorCollection": [[14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3featureextractor": [[15, 1, 1, "", "I3FeatureExtractor"], [15, 1, 1, "", "I3FeatureExtractorIceCube86"], [15, 1, 1, "", "I3FeatureExtractorIceCubeDeepCore"], [15, 1, 1, "", "I3FeatureExtractorIceCubeUpgrade"], [15, 1, 1, "", "I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3genericextractor": [[16, 1, 1, "", "I3GenericExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, 1, 1, "", "I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, 1, 1, "", "I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, 1, 1, "", "I3ParticleExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, 1, 1, "", "I3PISAExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, 1, 1, "", "I3QUESOExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, 1, 1, "", "I3RetroExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, 1, 1, "", "I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, 1, 1, "", "I3TruthExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, 1, 1, "", "I3TUMExtractor"]], "graphnet.data.extractors.utilities": [[27, 0, 0, "-", "collections"], [28, 0, 0, "-", "frames"], [29, 0, 0, "-", "types"]], "graphnet.data.extractors.utilities.collections": [[27, 5, 1, "", "flatten_nested_dictionary"], [27, 5, 1, "", "serialise"], [27, 5, 1, "", "transpose_list_of_dicts"]], "graphnet.data.extractors.utilities.frames": [[28, 5, 1, "", "frame_is_montecarlo"], [28, 5, 1, "", "frame_is_noise"], [28, 5, 1, "", "get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.types": [[29, 5, 1, "", "break_cyclic_recursion"], [29, 5, 1, "", "cast_object_to_pure_python"], [29, 5, 1, "", "cast_pulse_series_to_pure_python"], [29, 5, 1, "", "get_member_variables"], [29, 5, 1, "", "is_boost_class"], [29, 5, 1, "", "is_boost_enum"], [29, 5, 1, "", "is_icecube_class"], [29, 5, 1, "", "is_method"], [29, 5, 1, "", "is_type"]], "graphnet.data.filters": [[30, 1, 1, "", "I3Filter"], [30, 1, 1, "", "I3FilterMask"], [30, 1, 1, "", "NullSplitI3Filter"]], "graphnet.data.parquet": [[32, 0, 0, "-", "parquet_dataconverter"]], "graphnet.data.parquet.parquet_dataconverter": [[32, 1, 1, "", "ParquetDataConverter"]], "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter": [[32, 2, 1, "", "file_suffix"], [32, 3, 1, "", "merge_files"], [32, 3, 1, "", "save_data"]], "graphnet.data.pipeline": [[33, 1, 1, "", "InSQLitePipeline"]], "graphnet.data.sqlite": [[35, 0, 0, "-", "sqlite_dataconverter"], [36, 0, 0, "-", "sqlite_utilities"]], "graphnet.data.sqlite.sqlite_dataconverter": [[35, 1, 1, "", "SQLiteDataConverter"], [35, 5, 1, "", "construct_dataframe"], [35, 5, 1, "", "is_mc_tree"], [35, 5, 1, "", "is_pulse_map"]], "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter": [[35, 3, 1, "", "any_pulsemap_is_non_empty"], [35, 2, 1, "", "file_suffix"], [35, 3, 1, "", "merge_files"], [35, 3, 1, "", "save_data"]], "graphnet.data.sqlite.sqlite_utilities": [[36, 5, 1, "", "attach_index"], [36, 5, 1, "", "create_table"], [36, 5, 1, "", "create_table_and_save_to_sql"], [36, 5, 1, "", "database_exists"], [36, 5, 1, "", "database_table_exists"], [36, 5, 1, "", "run_sql_code"], [36, 5, 1, "", "save_to_sql"]], "graphnet.data.utilities": [[38, 0, 0, "-", "parquet_to_sqlite"], [39, 0, 0, "-", "random"], [40, 0, 0, "-", "string_selection_resolver"]], "graphnet.data.utilities.parquet_to_sqlite": [[38, 1, 1, "", "ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter": [[38, 3, 1, "", "run"]], "graphnet.data.utilities.random": [[39, 5, 1, "", "pairwise_shuffle"]], "graphnet.data.utilities.string_selection_resolver": [[40, 1, 1, "", "StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver": [[40, 3, 1, "", "resolve"]], "graphnet.deployment.i3modules": [[44, 0, 0, "-", "graphnet_module"]], "graphnet.deployment.i3modules.graphnet_module": [[44, 1, 1, "", "GraphNeTI3Module"], [44, 1, 1, "", "I3InferenceModule"], [44, 1, 1, "", "I3PulseCleanerModule"]], "graphnet.models": [[46, 0, 0, "-", "coarsening"], [47, 0, 0, "-", "components"], [50, 0, 0, "-", "detector"], [54, 0, 0, "-", "gnn"], [60, 0, 0, "-", "graphs"], [69, 0, 0, "-", "model"], [70, 0, 0, "-", "standard_model"], [71, 0, 0, "-", "task"], [75, 0, 0, "-", "utils"]], "graphnet.models.coarsening": [[46, 1, 1, "", "AttributeCoarsening"], [46, 1, 1, "", "Coarsening"], [46, 1, 1, "", "CustomDOMCoarsening"], [46, 1, 1, "", "DOMAndTimeWindowCoarsening"], [46, 1, 1, "", "DOMCoarsening"], [46, 5, 1, "", "unbatch_edge_index"]], "graphnet.models.coarsening.Coarsening": [[46, 3, 1, "", "forward"], [46, 2, 1, "", "reduce_options"]], "graphnet.models.components": [[48, 0, 0, "-", "layers"], [49, 0, 0, "-", "pool"]], "graphnet.models.components.layers": [[48, 1, 1, "", "DynEdgeConv"], [48, 1, 1, "", "DynTrans"], [48, 1, 1, "", "EdgeConvTito"]], "graphnet.models.components.layers.DynEdgeConv": [[48, 3, 1, "", "forward"]], "graphnet.models.components.layers.DynTrans": [[48, 3, 1, "", "forward"]], "graphnet.models.components.layers.EdgeConvTito": [[48, 3, 1, "", "forward"], [48, 3, 1, "", "message"], [48, 3, 1, "", "reset_parameters"]], "graphnet.models.components.pool": [[49, 5, 1, "", "group_by"], [49, 5, 1, "", "group_pulses_to_dom"], [49, 5, 1, "", "group_pulses_to_pmt"], [49, 5, 1, "", "min_pool"], [49, 5, 1, "", "min_pool_x"], [49, 5, 1, "", "std_pool"], [49, 5, 1, "", "std_pool_x"], [49, 5, 1, "", "sum_pool"], [49, 5, 1, "", "sum_pool_and_distribute"], [49, 5, 1, "", "sum_pool_x"]], "graphnet.models.detector": [[51, 0, 0, "-", "detector"], [52, 0, 0, "-", "icecube"], [53, 0, 0, "-", "prometheus"]], "graphnet.models.detector.detector": [[51, 1, 1, "", "Detector"]], "graphnet.models.detector.detector.Detector": [[51, 3, 1, "", "feature_map"], [51, 3, 1, "", "forward"], [51, 4, 1, "", "geometry_table"], [51, 4, 1, "", "sensor_index_name"], [51, 4, 1, "", "sensor_position_names"], [51, 4, 1, "", "string_index_name"]], "graphnet.models.detector.icecube": [[52, 1, 1, "", "IceCube86"], [52, 1, 1, "", "IceCubeDeepCore"], [52, 1, 1, "", "IceCubeKaggle"], [52, 1, 1, "", "IceCubeUpgrade"]], "graphnet.models.detector.icecube.IceCube86": [[52, 3, 1, "", "feature_map"], [52, 2, 1, "", "geometry_table_path"], [52, 2, 1, "", "sensor_id_column"], [52, 2, 1, "", "string_id_column"], [52, 2, 1, "", "xyz"]], "graphnet.models.detector.icecube.IceCubeDeepCore": [[52, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeKaggle": [[52, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeUpgrade": [[52, 3, 1, "", "feature_map"], [52, 2, 1, "", "geometry_table_path"], [52, 2, 1, "", "sensor_id_column"], [52, 2, 1, "", "string_id_column"], [52, 2, 1, "", "xyz"]], "graphnet.models.detector.prometheus": [[53, 1, 1, "", "ORCA150"], [53, 1, 1, "", "Prometheus"]], "graphnet.models.detector.prometheus.ORCA150": [[53, 3, 1, "", "feature_map"], [53, 2, 1, "", "geometry_table_path"], [53, 2, 1, "", "sensor_id_column"], [53, 2, 1, "", "string_id_column"], [53, 2, 1, "", "xyz"]], "graphnet.models.gnn": [[55, 0, 0, "-", "convnet"], [56, 0, 0, "-", "dynedge"], [57, 0, 0, "-", "dynedge_jinst"], [58, 0, 0, "-", "dynedge_kaggle_tito"], [59, 0, 0, "-", "gnn"]], "graphnet.models.gnn.convnet": [[55, 1, 1, "", "ConvNet"]], "graphnet.models.gnn.convnet.ConvNet": [[55, 3, 1, "", "forward"]], "graphnet.models.gnn.dynedge": [[56, 1, 1, "", "DynEdge"]], "graphnet.models.gnn.dynedge.DynEdge": [[56, 3, 1, "", "forward"]], "graphnet.models.gnn.dynedge_jinst": [[57, 1, 1, "", "DynEdgeJINST"]], "graphnet.models.gnn.dynedge_jinst.DynEdgeJINST": [[57, 3, 1, "", "forward"]], "graphnet.models.gnn.dynedge_kaggle_tito": [[58, 1, 1, "", "DynEdgeTITO"]], "graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO": [[58, 3, 1, "", "forward"]], "graphnet.models.gnn.gnn": [[59, 1, 1, "", "GNN"]], "graphnet.models.gnn.gnn.GNN": [[59, 3, 1, "", "forward"], [59, 4, 1, "", "nb_inputs"], [59, 4, 1, "", "nb_outputs"]], "graphnet.models.graphs": [[61, 0, 0, "-", "edges"], [64, 0, 0, "-", "graph_definition"], [65, 0, 0, "-", "graphs"], [66, 0, 0, "-", "nodes"], [68, 0, 0, "-", "utils"]], "graphnet.models.graphs.edges": [[62, 0, 0, "-", "edges"], [63, 0, 0, "-", "minkowski"]], "graphnet.models.graphs.edges.edges": [[62, 1, 1, "", "EdgeDefinition"], [62, 1, 1, "", "EuclideanEdges"], [62, 1, 1, "", "KNNEdges"], [62, 1, 1, "", "RadialEdges"]], "graphnet.models.graphs.edges.edges.EdgeDefinition": [[62, 3, 1, "", "forward"]], "graphnet.models.graphs.edges.minkowski": [[63, 1, 1, "", "MinkowskiKNNEdges"], [63, 5, 1, "", "compute_minkowski_distance_mat"]], "graphnet.models.graphs.graph_definition": [[64, 1, 1, "", "GraphDefinition"]], "graphnet.models.graphs.graph_definition.GraphDefinition": [[64, 3, 1, "", "forward"]], "graphnet.models.graphs.graphs": [[65, 1, 1, "", "KNNGraph"]], "graphnet.models.graphs.nodes": [[67, 0, 0, "-", "nodes"]], "graphnet.models.graphs.nodes.nodes": [[67, 1, 1, "", "NodeDefinition"], [67, 1, 1, "", "NodesAsPulses"], [67, 1, 1, "", "PercentileClusters"]], "graphnet.models.graphs.nodes.nodes.NodeDefinition": [[67, 3, 1, "", "forward"], [67, 4, 1, "", "nb_outputs"], [67, 3, 1, "", "set_number_of_inputs"], [67, 3, 1, "", "set_output_feature_names"]], "graphnet.models.graphs.utils": [[68, 5, 1, "", "cluster_summarize_with_percentiles"], [68, 5, 1, "", "gather_cluster_sequence"], [68, 5, 1, "", "identify_indices"], [68, 5, 1, "", "lex_sort"]], "graphnet.models.model": [[69, 1, 1, "", "Model"]], "graphnet.models.model.Model": [[69, 3, 1, "", "from_config"], [69, 3, 1, "", "load"], [69, 3, 1, "", "load_state_dict"], [69, 3, 1, "", "save"], [69, 3, 1, "", "save_state_dict"]], "graphnet.models.standard_model": [[70, 1, 1, "", "StandardModel"]], "graphnet.models.standard_model.StandardModel": [[70, 3, 1, "", "compute_loss"], [70, 3, 1, "", "configure_optimizers"], [70, 3, 1, "", "fit"], [70, 3, 1, "", "forward"], [70, 3, 1, "", "inference"], [70, 3, 1, "", "predict"], [70, 3, 1, "", "predict_as_dataframe"], [70, 4, 1, "", "prediction_labels"], [70, 3, 1, "", "shared_step"], [70, 4, 1, "", "target_labels"], [70, 3, 1, "", "train"], [70, 3, 1, "", "training_step"], [70, 3, 1, "", "validation_step"]], "graphnet.models.task": [[72, 0, 0, "-", "classification"], [73, 0, 0, "-", "reconstruction"], [74, 0, 0, "-", "task"]], "graphnet.models.task.classification": [[72, 1, 1, "", "BinaryClassificationTask"], [72, 1, 1, "", "BinaryClassificationTaskLogits"], [72, 1, 1, "", "MulticlassClassificationTask"]], "graphnet.models.task.classification.BinaryClassificationTask": [[72, 2, 1, "", "default_prediction_labels"], [72, 2, 1, "", "default_target_labels"], [72, 2, 1, "", "nb_inputs"]], "graphnet.models.task.classification.BinaryClassificationTaskLogits": [[72, 2, 1, "", "default_prediction_labels"], [72, 2, 1, "", "default_target_labels"], [72, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction": [[73, 1, 1, "", "AzimuthReconstruction"], [73, 1, 1, "", "AzimuthReconstructionWithKappa"], [73, 1, 1, "", "DirectionReconstructionWithKappa"], [73, 1, 1, "", "EnergyReconstruction"], [73, 1, 1, "", "EnergyReconstructionWithPower"], [73, 1, 1, "", "EnergyReconstructionWithUncertainty"], [73, 1, 1, "", "EnergyTCReconstruction"], [73, 1, 1, "", "InelasticityReconstruction"], [73, 1, 1, "", "PositionReconstruction"], [73, 1, 1, "", "TimeReconstruction"], [73, 1, 1, "", "VertexReconstruction"], [73, 1, 1, "", "ZenithReconstruction"], [73, 1, 1, "", "ZenithReconstructionWithKappa"]], "graphnet.models.task.reconstruction.AzimuthReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyReconstructionWithPower": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.EnergyTCReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.InelasticityReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.PositionReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.TimeReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.VertexReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.ZenithReconstruction": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa": [[73, 2, 1, "", "default_prediction_labels"], [73, 2, 1, "", "default_target_labels"], [73, 2, 1, "", "nb_inputs"]], "graphnet.models.task.task": [[74, 1, 1, "", "IdentityTask"], [74, 1, 1, "", "LearnedTask"], [74, 1, 1, "", "StandardFlowTask"], [74, 1, 1, "", "StandardLearnedTask"], [74, 1, 1, "", "Task"]], "graphnet.models.task.task.IdentityTask": [[74, 4, 1, "", "default_prediction_labels"], [74, 4, 1, "", "default_target_labels"], [74, 4, 1, "", "nb_inputs"]], "graphnet.models.task.task.LearnedTask": [[74, 3, 1, "", "compute_loss"], [74, 3, 1, "", "forward"], [74, 4, 1, "", "nb_inputs"]], "graphnet.models.task.task.StandardFlowTask": [[74, 3, 1, "", "compute_loss"], [74, 3, 1, "", "forward"], [74, 3, 1, "", "nb_inputs"]], "graphnet.models.task.task.StandardLearnedTask": [[74, 3, 1, "", "compute_loss"], [74, 4, 1, "", "nb_inputs"]], "graphnet.models.task.task.Task": [[74, 4, 1, "", "default_prediction_labels"], [74, 4, 1, "", "default_target_labels"], [74, 3, 1, "", "inference"], [74, 4, 1, "", "nb_inputs"], [74, 3, 1, "", "train_eval"]], "graphnet.models.utils": [[75, 5, 1, "", "calculate_distance_matrix"], [75, 5, 1, "", "calculate_xyzt_homophily"], [75, 5, 1, "", "knn_graph_batch"]], "graphnet.pisa": [[77, 0, 0, "-", "fitting"], [78, 0, 0, "-", "plotting"]], "graphnet.pisa.fitting": [[77, 1, 1, "", "ContourFitter"], [77, 1, 1, "", "WeightFitter"], [77, 5, 1, "", "config_updater"]], "graphnet.pisa.fitting.ContourFitter": [[77, 3, 1, "", "fit_1d_contour"], [77, 3, 1, "", "fit_2d_contour"]], "graphnet.pisa.fitting.WeightFitter": [[77, 3, 1, "", "fit_weights"]], "graphnet.pisa.plotting": [[78, 5, 1, "", "plot_1D_contour"], [78, 5, 1, "", "plot_2D_contour"], [78, 5, 1, "", "read_entry"]], "graphnet.training": [[80, 0, 0, "-", "callbacks"], [81, 0, 0, "-", "labels"], [82, 0, 0, "-", "loss_functions"], [83, 0, 0, "-", "utils"], [84, 0, 0, "-", "weight_fitting"]], "graphnet.training.callbacks": [[80, 1, 1, "", "GraphnetEarlyStopping"], [80, 1, 1, "", "PiecewiseLinearLR"], [80, 1, 1, "", "ProgressBar"]], "graphnet.training.callbacks.GraphnetEarlyStopping": [[80, 3, 1, "", "on_fit_end"], [80, 3, 1, "", "on_train_epoch_end"], [80, 3, 1, "", "on_validation_end"], [80, 3, 1, "", "setup"]], "graphnet.training.callbacks.PiecewiseLinearLR": [[80, 3, 1, "", "get_lr"]], "graphnet.training.callbacks.ProgressBar": [[80, 3, 1, "", "get_metrics"], [80, 3, 1, "", "init_predict_tqdm"], [80, 3, 1, "", "init_test_tqdm"], [80, 3, 1, "", "init_train_tqdm"], [80, 3, 1, "", "init_validation_tqdm"], [80, 3, 1, "", "on_train_epoch_end"], [80, 3, 1, "", "on_train_epoch_start"]], "graphnet.training.labels": [[81, 1, 1, "", "Direction"], [81, 1, 1, "", "Label"]], "graphnet.training.labels.Label": [[81, 4, 1, "", "key"]], "graphnet.training.loss_functions": [[82, 1, 1, "", "BinaryCrossEntropyLoss"], [82, 1, 1, "", "CrossEntropyLoss"], [82, 1, 1, "", "EuclideanDistanceLoss"], [82, 1, 1, "", "LogCMK"], [82, 1, 1, "", "LogCoshLoss"], [82, 1, 1, "", "LossFunction"], [82, 1, 1, "", "MSELoss"], [82, 1, 1, "", "RMSELoss"], [82, 1, 1, "", "VonMisesFisher2DLoss"], [82, 1, 1, "", "VonMisesFisher3DLoss"], [82, 1, 1, "", "VonMisesFisherLoss"]], "graphnet.training.loss_functions.LogCMK": [[82, 3, 1, "", "backward"], [82, 3, 1, "", "forward"]], "graphnet.training.loss_functions.LossFunction": [[82, 3, 1, "", "forward"]], "graphnet.training.loss_functions.VonMisesFisherLoss": [[82, 3, 1, "", "log_cmk"], [82, 3, 1, "", "log_cmk_approx"], [82, 3, 1, "", "log_cmk_exact"]], "graphnet.training.utils": [[83, 5, 1, "", "collate_fn"], [83, 1, 1, "", "collator_sequence_buckleting"], [83, 5, 1, "", "get_predictions"], [83, 5, 1, "", "make_dataloader"], [83, 5, 1, "", "make_train_validation_dataloader"], [83, 5, 1, "", "save_results"]], "graphnet.training.weight_fitting": [[84, 1, 1, "", "BjoernLow"], [84, 1, 1, "", "Uniform"], [84, 1, 1, "", "WeightFitter"]], "graphnet.training.weight_fitting.WeightFitter": [[84, 3, 1, "", "fit"]], "graphnet.utilities": [[86, 0, 0, "-", "argparse"], [87, 0, 0, "-", "config"], [94, 0, 0, "-", "decorators"], [95, 0, 0, "-", "deprecation_tools"], [96, 0, 0, "-", "filesys"], [97, 0, 0, "-", "imports"], [98, 0, 0, "-", "logging"], [99, 0, 0, "-", "maths"]], "graphnet.utilities.argparse": [[86, 1, 1, "", "ArgumentParser"], [86, 1, 1, "", "Options"]], "graphnet.utilities.argparse.ArgumentParser": [[86, 2, 1, "", "standard_arguments"], [86, 3, 1, "", "with_standard_arguments"]], "graphnet.utilities.argparse.Options": [[86, 3, 1, "", "contains"], [86, 3, 1, "", "pop_default"]], "graphnet.utilities.config": [[88, 0, 0, "-", "base_config"], [89, 0, 0, "-", "configurable"], [90, 0, 0, "-", "dataset_config"], [91, 0, 0, "-", "model_config"], [92, 0, 0, "-", "parsing"], [93, 0, 0, "-", "training_config"]], "graphnet.utilities.config.base_config": [[88, 1, 1, "", "BaseConfig"], [88, 5, 1, "", "get_all_argument_values"]], "graphnet.utilities.config.base_config.BaseConfig": [[88, 3, 1, "", "as_dict"], [88, 3, 1, "", "dump"], [88, 3, 1, "", "load"], [88, 2, 1, "", "model_config"], [88, 2, 1, "", "model_fields"]], "graphnet.utilities.config.configurable": [[89, 1, 1, "", "Configurable"]], "graphnet.utilities.config.configurable.Configurable": [[89, 4, 1, "", "config"], [89, 3, 1, "", "from_config"], [89, 3, 1, "", "save_config"]], "graphnet.utilities.config.dataset_config": [[90, 1, 1, "", "DatasetConfig"], [90, 1, 1, "", "DatasetConfigSaverABCMeta"], [90, 1, 1, "", "DatasetConfigSaverMeta"], [90, 5, 1, "", "save_dataset_config"]], "graphnet.utilities.config.dataset_config.DatasetConfig": [[90, 3, 1, "", "as_dict"], [90, 2, 1, "", "features"], [90, 2, 1, "", "graph_definition"], [90, 2, 1, "", "index_column"], [90, 2, 1, "", "loss_weight_column"], [90, 2, 1, "", "loss_weight_default_value"], [90, 2, 1, "", "loss_weight_table"], [90, 2, 1, "", "model_config"], [90, 2, 1, "", "model_fields"], [90, 2, 1, "", "node_truth"], [90, 2, 1, "", "node_truth_table"], [90, 2, 1, "", "path"], [90, 2, 1, "", "pulsemaps"], [90, 2, 1, "", "seed"], [90, 2, 1, "", "selection"], [90, 2, 1, "", "string_selection"], [90, 2, 1, "", "truth"], [90, 2, 1, "", "truth_table"]], "graphnet.utilities.config.model_config": [[91, 1, 1, "", "ModelConfig"], [91, 1, 1, "", "ModelConfigSaverABC"], [91, 1, 1, "", "ModelConfigSaverMeta"], [91, 5, 1, "", "save_model_config"]], "graphnet.utilities.config.model_config.ModelConfig": [[91, 2, 1, "", "arguments"], [91, 3, 1, "", "as_dict"], [91, 2, 1, "", "class_name"], [91, 2, 1, "", "model_config"], [91, 2, 1, "", "model_fields"]], "graphnet.utilities.config.parsing": [[92, 5, 1, "", "get_all_grapnet_classes"], [92, 5, 1, "", "get_graphnet_classes"], [92, 5, 1, "", "is_graphnet_class"], [92, 5, 1, "", "is_graphnet_module"], [92, 5, 1, "", "list_all_submodules"], [92, 5, 1, "", "traverse_and_apply"]], "graphnet.utilities.config.training_config": [[93, 1, 1, "", "TrainingConfig"]], "graphnet.utilities.config.training_config.TrainingConfig": [[93, 2, 1, "", "dataloader"], [93, 2, 1, "", "early_stopping_patience"], [93, 2, 1, "", "fit"], [93, 2, 1, "", "model_config"], [93, 2, 1, "", "model_fields"], [93, 2, 1, "", "target"]], "graphnet.utilities.deprecation_tools": [[95, 5, 1, "", "rename_state_dict_entries"]], "graphnet.utilities.filesys": [[96, 5, 1, "", "find_i3_files"], [96, 5, 1, "", "has_extension"], [96, 5, 1, "", "is_gcd_file"], [96, 5, 1, "", "is_i3_file"]], "graphnet.utilities.imports": [[97, 5, 1, "", "has_icecube_package"], [97, 5, 1, "", "has_pisa_package"], [97, 5, 1, "", "has_torch_package"], [97, 5, 1, "", "requires_icecube"]], "graphnet.utilities.logging": [[98, 1, 1, "", "Logger"], [98, 1, 1, "", "RepeatFilter"]], "graphnet.utilities.logging.Logger": [[98, 3, 1, "", "critical"], [98, 3, 1, "", "debug"], [98, 3, 1, "", "error"], [98, 4, 1, "", "file_handlers"], [98, 4, 1, "", "handlers"], [98, 3, 1, "", "info"], [98, 3, 1, "", "setLevel"], [98, 4, 1, "", "stream_handlers"], [98, 3, 1, "", "warning"], [98, 3, 1, "", "warning_once"]], "graphnet.utilities.logging.RepeatFilter": [[98, 3, 1, "", "filter"], [98, 2, 1, "", "nb_repeats_allowed"]], "graphnet.utilities.maths": [[99, 5, 1, "", "eps_like"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:attribute", "3": "py:method", "4": "py:property", "5": "py:function", "6": "py:exception"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "attribute", "Python attribute"], "3": ["py", "method", "Python method"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"], "6": ["py", "exception", "Python exception"]}, "titleterms": {"about": [0, 102], "impact": [0, 102], "usag": [0, 102], "acknowledg": [0, 102], "api": 1, "constant": [2, 4], "data": 3, "dataconvert": 5, "dataload": 6, "dataset": [7, 8], "parquet": [9, 31], "parquet_dataset": 10, "sqlite": [11, 34], "sqlite_dataset": 12, "extractor": 13, "i3extractor": 14, "i3featureextractor": 15, "i3genericextractor": 16, "i3hybridrecoextractor": 17, "i3ntmuonlabelsextractor": 18, "i3particleextractor": 19, "i3pisaextractor": 20, "i3quesoextractor": 21, "i3retroextractor": 22, "i3splinempeextractor": 23, "i3truthextractor": 24, "i3tumextractor": 25, "util": [26, 37, 68, 75, 83, 85], "collect": 27, "frame": 28, "type": 29, "filter": 30, "parquet_dataconvert": 32, "pipelin": 33, "sqlite_dataconvert": 35, "sqlite_util": 36, "parquet_to_sqlit": 38, "random": 39, "string_selection_resolv": 40, "deploy": [41, 43], "i3modul": 42, "graphnet_modul": 44, "model": [45, 69], "coarsen": 46, "compon": 47, "layer": 48, "pool": 49, "detector": [50, 51], "icecub": 52, "prometheu": 53, "gnn": [54, 59], "convnet": 55, "dynedg": 56, "dynedge_jinst": 57, "dynedge_kaggle_tito": 58, "graph": [60, 65], "edg": [61, 62], "minkowski": 63, "graph_definit": 64, "node": [66, 67], "standard_model": 70, "task": [71, 74], "classif": 72, "reconstruct": 73, "pisa": 76, "fit": 77, "plot": 78, "train": 79, "callback": 80, "label": 81, "loss_funct": 82, "weight_fit": 84, "argpars": 86, "config": 87, "base_config": 88, "configur": 89, "dataset_config": 90, "model_config": 91, "pars": 92, "training_config": 93, "decor": 94, "deprecation_tool": 95, "filesi": 96, "import": 97, "log": 98, "math": 99, "src": 100, "contribut": 101, "github": 101, "issu": 101, "pull": 101, "request": 101, "convent": 101, "code": 101, "qualiti": 101, "instal": 103, "icetrai": 103, "stand": 103, "alon": 103, "run": 103, "docker": 103}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"About": [[0, "about"], [102, "about"]], "Impact": [[0, "impact"], [102, "impact"]], "Usage": [[0, "usage"], [102, "usage"]], "Acknowledgements": [[0, "acknowledgements"], [102, "acknowledgements"]], "API": [[1, "module-graphnet"]], "constants": [[2, "module-graphnet.constants"], [4, "module-graphnet.data.constants"]], "data": [[3, "module-graphnet.data"]], "dataconverter": [[5, "module-graphnet.data.dataconverter"]], "dataloader": [[6, "module-graphnet.data.dataloader"]], "dataset": [[7, "module-graphnet.data.dataset"], [8, "module-graphnet.data.dataset.dataset"]], "parquet": [[9, "module-graphnet.data.dataset.parquet"], [31, "module-graphnet.data.parquet"]], "parquet_dataset": [[10, "module-graphnet.data.dataset.parquet.parquet_dataset"]], "sqlite": [[11, "module-graphnet.data.dataset.sqlite"], [34, "module-graphnet.data.sqlite"]], "sqlite_dataset": [[12, "module-graphnet.data.dataset.sqlite.sqlite_dataset"]], "extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "utilities": [[26, "module-graphnet.data.extractors.utilities"], [37, "module-graphnet.data.utilities"], [85, "module-graphnet.utilities"]], "collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "types": [[29, "module-graphnet.data.extractors.utilities.types"]], "filters": [[30, "module-graphnet.data.filters"]], "parquet_dataconverter": [[32, "module-graphnet.data.parquet.parquet_dataconverter"]], "pipeline": [[33, "module-graphnet.data.pipeline"]], "sqlite_dataconverter": [[35, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "sqlite_utilities": [[36, "module-graphnet.data.sqlite.sqlite_utilities"]], "parquet_to_sqlite": [[38, "module-graphnet.data.utilities.parquet_to_sqlite"]], "random": [[39, "module-graphnet.data.utilities.random"]], "string_selection_resolver": [[40, "module-graphnet.data.utilities.string_selection_resolver"]], "deployment": [[41, "module-graphnet.deployment"]], "i3modules": [[42, "i3modules"]], "deployer": [[43, "deployer"]], "graphnet_module": [[44, "module-graphnet.deployment.i3modules.graphnet_module"]], "models": [[45, "module-graphnet.models"]], "coarsening": [[46, "module-graphnet.models.coarsening"]], "components": [[47, "module-graphnet.models.components"]], "layers": [[48, "module-graphnet.models.components.layers"]], "pool": [[49, "module-graphnet.models.components.pool"]], "detector": [[50, "module-graphnet.models.detector"], [51, "module-graphnet.models.detector.detector"]], "icecube": [[52, "module-graphnet.models.detector.icecube"]], "prometheus": [[53, "module-graphnet.models.detector.prometheus"]], "gnn": [[54, "module-graphnet.models.gnn"], [59, "module-graphnet.models.gnn.gnn"]], "convnet": [[55, "module-graphnet.models.gnn.convnet"]], "dynedge": [[56, "module-graphnet.models.gnn.dynedge"]], "dynedge_jinst": [[57, "module-graphnet.models.gnn.dynedge_jinst"]], "dynedge_kaggle_tito": [[58, "module-graphnet.models.gnn.dynedge_kaggle_tito"]], "graphs": [[60, "module-graphnet.models.graphs"], [65, "module-graphnet.models.graphs.graphs"]], "edges": [[61, "module-graphnet.models.graphs.edges"], [62, "module-graphnet.models.graphs.edges.edges"]], "minkowski": [[63, "module-graphnet.models.graphs.edges.minkowski"]], "graph_definition": [[64, "module-graphnet.models.graphs.graph_definition"]], "nodes": [[66, "module-graphnet.models.graphs.nodes"], [67, "module-graphnet.models.graphs.nodes.nodes"]], "utils": [[68, "module-graphnet.models.graphs.utils"], [75, "module-graphnet.models.utils"], [83, "module-graphnet.training.utils"]], "model": [[69, "module-graphnet.models.model"]], "standard_model": [[70, "module-graphnet.models.standard_model"]], "task": [[71, "module-graphnet.models.task"], [74, "module-graphnet.models.task.task"]], "classification": [[72, "module-graphnet.models.task.classification"]], "reconstruction": [[73, "module-graphnet.models.task.reconstruction"]], "pisa": [[76, "module-graphnet.pisa"]], "fitting": [[77, "module-graphnet.pisa.fitting"]], "plotting": [[78, "module-graphnet.pisa.plotting"]], "training": [[79, "module-graphnet.training"]], "callbacks": [[80, "module-graphnet.training.callbacks"]], "labels": [[81, "module-graphnet.training.labels"]], "loss_functions": [[82, "module-graphnet.training.loss_functions"]], "weight_fitting": [[84, "module-graphnet.training.weight_fitting"]], "argparse": [[86, "module-graphnet.utilities.argparse"]], "config": [[87, "module-graphnet.utilities.config"]], "base_config": [[88, "module-graphnet.utilities.config.base_config"]], "configurable": [[89, "module-graphnet.utilities.config.configurable"]], "dataset_config": [[90, "module-graphnet.utilities.config.dataset_config"]], "model_config": [[91, "module-graphnet.utilities.config.model_config"]], "parsing": [[92, "module-graphnet.utilities.config.parsing"]], "training_config": [[93, "module-graphnet.utilities.config.training_config"]], "decorators": [[94, "module-graphnet.utilities.decorators"]], "deprecation_tools": [[95, "module-graphnet.utilities.deprecation_tools"]], "filesys": [[96, "module-graphnet.utilities.filesys"]], "imports": [[97, "module-graphnet.utilities.imports"]], "logging": [[98, "module-graphnet.utilities.logging"]], "maths": [[99, "module-graphnet.utilities.maths"]], "src": [[100, "src"]], "Contribute": [[101, "contribute"]], "GitHub issues": [[101, "github-issues"]], "Pull requests": [[101, "pull-requests"]], "Conventions": [[101, "conventions"]], "Code quality": [[101, "code-quality"]], "Install": [[103, "install"]], "Installing with IceTray": [[103, "installing-with-icetray"]], "Installing stand-alone": [[103, "installing-stand-alone"]], "Running in Docker": [[103, "running-in-docker"]]}, "indexentries": {"graphnet": [[1, "module-graphnet"]], "module": [[1, "module-graphnet"], [2, "module-graphnet.constants"], [3, "module-graphnet.data"], [4, "module-graphnet.data.constants"], [5, "module-graphnet.data.dataconverter"], [6, "module-graphnet.data.dataloader"], [7, "module-graphnet.data.dataset"], [8, "module-graphnet.data.dataset.dataset"], [9, "module-graphnet.data.dataset.parquet"], [10, "module-graphnet.data.dataset.parquet.parquet_dataset"], [11, "module-graphnet.data.dataset.sqlite"], [12, "module-graphnet.data.dataset.sqlite.sqlite_dataset"], [13, "module-graphnet.data.extractors"], [14, "module-graphnet.data.extractors.i3extractor"], [15, "module-graphnet.data.extractors.i3featureextractor"], [16, "module-graphnet.data.extractors.i3genericextractor"], [17, "module-graphnet.data.extractors.i3hybridrecoextractor"], [18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"], [19, "module-graphnet.data.extractors.i3particleextractor"], [20, "module-graphnet.data.extractors.i3pisaextractor"], [21, "module-graphnet.data.extractors.i3quesoextractor"], [22, "module-graphnet.data.extractors.i3retroextractor"], [23, "module-graphnet.data.extractors.i3splinempeextractor"], [24, "module-graphnet.data.extractors.i3truthextractor"], [25, "module-graphnet.data.extractors.i3tumextractor"], [26, "module-graphnet.data.extractors.utilities"], [27, "module-graphnet.data.extractors.utilities.collections"], [28, "module-graphnet.data.extractors.utilities.frames"], [29, "module-graphnet.data.extractors.utilities.types"], [30, "module-graphnet.data.filters"], [31, "module-graphnet.data.parquet"], [32, "module-graphnet.data.parquet.parquet_dataconverter"], [33, "module-graphnet.data.pipeline"], [34, "module-graphnet.data.sqlite"], [35, "module-graphnet.data.sqlite.sqlite_dataconverter"], [36, "module-graphnet.data.sqlite.sqlite_utilities"], [37, "module-graphnet.data.utilities"], [38, "module-graphnet.data.utilities.parquet_to_sqlite"], [39, "module-graphnet.data.utilities.random"], [40, "module-graphnet.data.utilities.string_selection_resolver"], [41, "module-graphnet.deployment"], [44, "module-graphnet.deployment.i3modules.graphnet_module"], [45, "module-graphnet.models"], [46, "module-graphnet.models.coarsening"], [47, "module-graphnet.models.components"], [48, "module-graphnet.models.components.layers"], [49, "module-graphnet.models.components.pool"], [50, "module-graphnet.models.detector"], [51, "module-graphnet.models.detector.detector"], [52, "module-graphnet.models.detector.icecube"], [53, "module-graphnet.models.detector.prometheus"], [54, "module-graphnet.models.gnn"], [55, "module-graphnet.models.gnn.convnet"], [56, "module-graphnet.models.gnn.dynedge"], [57, "module-graphnet.models.gnn.dynedge_jinst"], [58, "module-graphnet.models.gnn.dynedge_kaggle_tito"], [59, "module-graphnet.models.gnn.gnn"], [60, "module-graphnet.models.graphs"], [61, "module-graphnet.models.graphs.edges"], [62, "module-graphnet.models.graphs.edges.edges"], [63, "module-graphnet.models.graphs.edges.minkowski"], [64, "module-graphnet.models.graphs.graph_definition"], [65, "module-graphnet.models.graphs.graphs"], [66, "module-graphnet.models.graphs.nodes"], [67, "module-graphnet.models.graphs.nodes.nodes"], [68, "module-graphnet.models.graphs.utils"], [69, "module-graphnet.models.model"], [70, "module-graphnet.models.standard_model"], [71, "module-graphnet.models.task"], [72, "module-graphnet.models.task.classification"], [73, "module-graphnet.models.task.reconstruction"], [74, "module-graphnet.models.task.task"], [75, "module-graphnet.models.utils"], [76, "module-graphnet.pisa"], [77, "module-graphnet.pisa.fitting"], [78, "module-graphnet.pisa.plotting"], [79, "module-graphnet.training"], [80, "module-graphnet.training.callbacks"], [81, "module-graphnet.training.labels"], [82, "module-graphnet.training.loss_functions"], [83, "module-graphnet.training.utils"], [84, "module-graphnet.training.weight_fitting"], [85, "module-graphnet.utilities"], [86, "module-graphnet.utilities.argparse"], [87, "module-graphnet.utilities.config"], [88, "module-graphnet.utilities.config.base_config"], [89, "module-graphnet.utilities.config.configurable"], [90, "module-graphnet.utilities.config.dataset_config"], [91, "module-graphnet.utilities.config.model_config"], [92, "module-graphnet.utilities.config.parsing"], [93, "module-graphnet.utilities.config.training_config"], [94, "module-graphnet.utilities.decorators"], [95, "module-graphnet.utilities.deprecation_tools"], [96, "module-graphnet.utilities.filesys"], [97, "module-graphnet.utilities.imports"], [98, "module-graphnet.utilities.logging"], [99, "module-graphnet.utilities.maths"]], "graphnet.constants": [[2, "module-graphnet.constants"]], "graphnet.data": [[3, "module-graphnet.data"]], "deepcore (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.DEEPCORE"]], "deepcore (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.DEEPCORE"]], "features (class in graphnet.data.constants)": [[4, "graphnet.data.constants.FEATURES"]], "icecube86 (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.ICECUBE86"]], "icecube86 (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.ICECUBE86"]], "kaggle (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.KAGGLE"]], "kaggle (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.KAGGLE"]], "prometheus (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.PROMETHEUS"]], "prometheus (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.PROMETHEUS"]], "truth (class in graphnet.data.constants)": [[4, "graphnet.data.constants.TRUTH"]], "upgrade (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.UPGRADE"]], "upgrade (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.UPGRADE"]], "graphnet.data.constants": [[4, "module-graphnet.data.constants"]], "dataconverter (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.DataConverter"]], "fileset (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.FileSet"]], "cache_output_files() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.cache_output_files"]], "execute() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.execute"]], "file_suffix (graphnet.data.dataconverter.dataconverter property)": [[5, "graphnet.data.dataconverter.DataConverter.file_suffix"]], "gcd_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.gcd_file"]], "get_map_function() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.get_map_function"]], "graphnet.data.dataconverter": [[5, "module-graphnet.data.dataconverter"]], "i3_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.i3_file"]], "init_global_index() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.init_global_index"]], "merge_files() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.merge_files"]], "save_data() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.save_data"]], "dataloader (class in graphnet.data.dataloader)": [[6, "graphnet.data.dataloader.DataLoader"]], "collate_fn() (in module graphnet.data.dataloader)": [[6, "graphnet.data.dataloader.collate_fn"]], "do_shuffle() (in module graphnet.data.dataloader)": [[6, "graphnet.data.dataloader.do_shuffle"]], "from_dataset_config() (graphnet.data.dataloader.dataloader class method)": [[6, "graphnet.data.dataloader.DataLoader.from_dataset_config"]], "graphnet.data.dataloader": [[6, "module-graphnet.data.dataloader"]], "graphnet.data.dataset": [[7, "module-graphnet.data.dataset"]], "columnmissingexception": [[8, "graphnet.data.dataset.dataset.ColumnMissingException"]], "dataset (class in graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.Dataset"]], "ensembledataset (class in graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.EnsembleDataset"]], "add_label() (graphnet.data.dataset.dataset.dataset method)": [[8, "graphnet.data.dataset.dataset.Dataset.add_label"]], "concatenate() (graphnet.data.dataset.dataset.dataset class method)": [[8, "graphnet.data.dataset.dataset.Dataset.concatenate"]], "from_config() (graphnet.data.dataset.dataset.dataset class method)": [[8, "graphnet.data.dataset.dataset.Dataset.from_config"]], "graphnet.data.dataset.dataset": [[8, "module-graphnet.data.dataset.dataset"]], "load_module() (in module graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.load_module"]], "parse_graph_definition() (in module graphnet.data.dataset.dataset)": [[8, "graphnet.data.dataset.dataset.parse_graph_definition"]], "path (graphnet.data.dataset.dataset.dataset property)": [[8, "graphnet.data.dataset.dataset.Dataset.path"]], "query_table() (graphnet.data.dataset.dataset.dataset method)": [[8, "graphnet.data.dataset.dataset.Dataset.query_table"]], "truth_table (graphnet.data.dataset.dataset.dataset property)": [[8, "graphnet.data.dataset.dataset.Dataset.truth_table"]], "graphnet.data.dataset.parquet": [[9, "module-graphnet.data.dataset.parquet"]], "parquetdataset (class in graphnet.data.dataset.parquet.parquet_dataset)": [[10, "graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset"]], "graphnet.data.dataset.parquet.parquet_dataset": [[10, "module-graphnet.data.dataset.parquet.parquet_dataset"]], "query_table() (graphnet.data.dataset.parquet.parquet_dataset.parquetdataset method)": [[10, "graphnet.data.dataset.parquet.parquet_dataset.ParquetDataset.query_table"]], "graphnet.data.dataset.sqlite": [[11, "module-graphnet.data.dataset.sqlite"]], "sqlitedataset (class in graphnet.data.dataset.sqlite.sqlite_dataset)": [[12, "graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset"]], "graphnet.data.dataset.sqlite.sqlite_dataset": [[12, "module-graphnet.data.dataset.sqlite.sqlite_dataset"]], "query_table() (graphnet.data.dataset.sqlite.sqlite_dataset.sqlitedataset method)": [[12, "graphnet.data.dataset.sqlite.sqlite_dataset.SQLiteDataset.query_table"]], "graphnet.data.extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor"]], "i3extractorcollection (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "name (graphnet.data.extractors.i3extractor.i3extractor property)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.name"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractor method)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.set_files"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractorcollection method)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection.set_files"]], "i3featureextractor (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractor"]], "i3featureextractoricecube86 (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCube86"]], "i3featureextractoricecubedeepcore (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeDeepCore"]], "i3featureextractoricecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeUpgrade"]], "i3pulsenoisetruthflagicecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor (class in graphnet.data.extractors.i3genericextractor)": [[16, "graphnet.data.extractors.i3genericextractor.I3GenericExtractor"]], "graphnet.data.extractors.i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3galacticplanehybridrecoextractor (class in graphnet.data.extractors.i3hybridrecoextractor)": [[17, "graphnet.data.extractors.i3hybridrecoextractor.I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelextractor (class in graphnet.data.extractors.i3ntmuonlabelsextractor)": [[18, "graphnet.data.extractors.i3ntmuonlabelsextractor.I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor (class in graphnet.data.extractors.i3particleextractor)": [[19, "graphnet.data.extractors.i3particleextractor.I3ParticleExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor (class in graphnet.data.extractors.i3pisaextractor)": [[20, "graphnet.data.extractors.i3pisaextractor.I3PISAExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor (class in graphnet.data.extractors.i3quesoextractor)": [[21, "graphnet.data.extractors.i3quesoextractor.I3QUESOExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor (class in graphnet.data.extractors.i3retroextractor)": [[22, "graphnet.data.extractors.i3retroextractor.I3RetroExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeicextractor (class in graphnet.data.extractors.i3splinempeextractor)": [[23, "graphnet.data.extractors.i3splinempeextractor.I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor (class in graphnet.data.extractors.i3truthextractor)": [[24, "graphnet.data.extractors.i3truthextractor.I3TruthExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor (class in graphnet.data.extractors.i3tumextractor)": [[25, "graphnet.data.extractors.i3tumextractor.I3TUMExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "graphnet.data.extractors.utilities": [[26, "module-graphnet.data.extractors.utilities"]], "flatten_nested_dictionary() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.flatten_nested_dictionary"]], "graphnet.data.extractors.utilities.collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "serialise() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.serialise"]], "transpose_list_of_dicts() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.transpose_list_of_dicts"]], "frame_is_montecarlo() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_montecarlo"]], "frame_is_noise() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_noise"]], "get_om_keys_and_pulseseries() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "break_cyclic_recursion() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.break_cyclic_recursion"]], "cast_object_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_object_to_pure_python"]], "cast_pulse_series_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_pulse_series_to_pure_python"]], "get_member_variables() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.get_member_variables"]], "graphnet.data.extractors.utilities.types": [[29, "module-graphnet.data.extractors.utilities.types"]], "is_boost_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_class"]], "is_boost_enum() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_enum"]], "is_icecube_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_icecube_class"]], "is_method() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_method"]], "is_type() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_type"]], "i3filter (class in graphnet.data.filters)": [[30, "graphnet.data.filters.I3Filter"]], "i3filtermask (class in graphnet.data.filters)": [[30, "graphnet.data.filters.I3FilterMask"]], "nullspliti3filter (class in graphnet.data.filters)": [[30, "graphnet.data.filters.NullSplitI3Filter"]], "graphnet.data.filters": [[30, "module-graphnet.data.filters"]], "graphnet.data.parquet": [[31, "module-graphnet.data.parquet"]], "parquetdataconverter (class in graphnet.data.parquet.parquet_dataconverter)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter"]], "file_suffix (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter attribute)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.file_suffix"]], "graphnet.data.parquet.parquet_dataconverter": [[32, "module-graphnet.data.parquet.parquet_dataconverter"]], "merge_files() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.merge_files"]], "save_data() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.save_data"]], "insqlitepipeline (class in graphnet.data.pipeline)": [[33, "graphnet.data.pipeline.InSQLitePipeline"]], "graphnet.data.pipeline": [[33, "module-graphnet.data.pipeline"]], "graphnet.data.sqlite": [[34, "module-graphnet.data.sqlite"]], "sqlitedataconverter (class in graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter"]], "any_pulsemap_is_non_empty() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.any_pulsemap_is_non_empty"]], "construct_dataframe() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.construct_dataframe"]], "file_suffix (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter attribute)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.file_suffix"]], "graphnet.data.sqlite.sqlite_dataconverter": [[35, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "is_mc_tree() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.is_mc_tree"]], "is_pulse_map() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.is_pulse_map"]], "merge_files() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.merge_files"]], "save_data() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.save_data"]], "attach_index() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.attach_index"]], "create_table() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.create_table"]], "create_table_and_save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.create_table_and_save_to_sql"]], "database_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.database_exists"]], "database_table_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.database_table_exists"]], "graphnet.data.sqlite.sqlite_utilities": [[36, "module-graphnet.data.sqlite.sqlite_utilities"]], "run_sql_code() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.run_sql_code"]], "save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.save_to_sql"]], "graphnet.data.utilities": [[37, "module-graphnet.data.utilities"]], "parquettosqliteconverter (class in graphnet.data.utilities.parquet_to_sqlite)": [[38, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite": [[38, "module-graphnet.data.utilities.parquet_to_sqlite"]], "run() (graphnet.data.utilities.parquet_to_sqlite.parquettosqliteconverter method)": [[38, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter.run"]], "graphnet.data.utilities.random": [[39, "module-graphnet.data.utilities.random"]], "pairwise_shuffle() (in module graphnet.data.utilities.random)": [[39, "graphnet.data.utilities.random.pairwise_shuffle"]], "stringselectionresolver (class in graphnet.data.utilities.string_selection_resolver)": [[40, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver": [[40, "module-graphnet.data.utilities.string_selection_resolver"]], "resolve() (graphnet.data.utilities.string_selection_resolver.stringselectionresolver method)": [[40, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver.resolve"]], "graphnet.deployment": [[41, "module-graphnet.deployment"]], "graphneti3module (class in graphnet.deployment.i3modules.graphnet_module)": [[44, "graphnet.deployment.i3modules.graphnet_module.GraphNeTI3Module"]], "i3inferencemodule (class in graphnet.deployment.i3modules.graphnet_module)": [[44, "graphnet.deployment.i3modules.graphnet_module.I3InferenceModule"]], "i3pulsecleanermodule (class in graphnet.deployment.i3modules.graphnet_module)": [[44, "graphnet.deployment.i3modules.graphnet_module.I3PulseCleanerModule"]], "graphnet.deployment.i3modules.graphnet_module": [[44, "module-graphnet.deployment.i3modules.graphnet_module"]], "graphnet.models": [[45, "module-graphnet.models"]], "attributecoarsening (class in graphnet.models.coarsening)": [[46, "graphnet.models.coarsening.AttributeCoarsening"]], "coarsening (class in graphnet.models.coarsening)": [[46, "graphnet.models.coarsening.Coarsening"]], "customdomcoarsening (class in graphnet.models.coarsening)": [[46, "graphnet.models.coarsening.CustomDOMCoarsening"]], "domandtimewindowcoarsening (class in graphnet.models.coarsening)": [[46, "graphnet.models.coarsening.DOMAndTimeWindowCoarsening"]], "domcoarsening (class in graphnet.models.coarsening)": [[46, "graphnet.models.coarsening.DOMCoarsening"]], "forward() (graphnet.models.coarsening.coarsening method)": [[46, "graphnet.models.coarsening.Coarsening.forward"]], "graphnet.models.coarsening": [[46, "module-graphnet.models.coarsening"]], "reduce_options (graphnet.models.coarsening.coarsening attribute)": [[46, "graphnet.models.coarsening.Coarsening.reduce_options"]], "unbatch_edge_index() (in module graphnet.models.coarsening)": [[46, "graphnet.models.coarsening.unbatch_edge_index"]], "graphnet.models.components": [[47, "module-graphnet.models.components"]], "dynedgeconv (class in graphnet.models.components.layers)": [[48, "graphnet.models.components.layers.DynEdgeConv"]], "dyntrans (class in graphnet.models.components.layers)": [[48, "graphnet.models.components.layers.DynTrans"]], "edgeconvtito (class in graphnet.models.components.layers)": [[48, "graphnet.models.components.layers.EdgeConvTito"]], "forward() (graphnet.models.components.layers.dynedgeconv method)": [[48, "graphnet.models.components.layers.DynEdgeConv.forward"]], "forward() (graphnet.models.components.layers.dyntrans method)": [[48, "graphnet.models.components.layers.DynTrans.forward"]], "forward() (graphnet.models.components.layers.edgeconvtito method)": [[48, "graphnet.models.components.layers.EdgeConvTito.forward"]], "graphnet.models.components.layers": [[48, "module-graphnet.models.components.layers"]], "message() (graphnet.models.components.layers.edgeconvtito method)": [[48, "graphnet.models.components.layers.EdgeConvTito.message"]], "reset_parameters() (graphnet.models.components.layers.edgeconvtito method)": [[48, "graphnet.models.components.layers.EdgeConvTito.reset_parameters"]], "graphnet.models.components.pool": [[49, "module-graphnet.models.components.pool"]], "group_by() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.group_by"]], "group_pulses_to_dom() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.group_pulses_to_dom"]], "group_pulses_to_pmt() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.group_pulses_to_pmt"]], "min_pool() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.min_pool"]], "min_pool_x() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.min_pool_x"]], "std_pool() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.std_pool"]], "std_pool_x() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.std_pool_x"]], "sum_pool() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.sum_pool"]], "sum_pool_and_distribute() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.sum_pool_and_distribute"]], "sum_pool_x() (in module graphnet.models.components.pool)": [[49, "graphnet.models.components.pool.sum_pool_x"]], "graphnet.models.detector": [[50, "module-graphnet.models.detector"]], "detector (class in graphnet.models.detector.detector)": [[51, "graphnet.models.detector.detector.Detector"]], "feature_map() (graphnet.models.detector.detector.detector method)": [[51, "graphnet.models.detector.detector.Detector.feature_map"]], "forward() (graphnet.models.detector.detector.detector method)": [[51, "graphnet.models.detector.detector.Detector.forward"]], "geometry_table (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.geometry_table"]], "graphnet.models.detector.detector": [[51, "module-graphnet.models.detector.detector"]], "sensor_index_name (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.sensor_index_name"]], "sensor_position_names (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.sensor_position_names"]], "string_index_name (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.string_index_name"]], "icecube86 (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCube86"]], "icecubedeepcore (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCubeDeepCore"]], "icecubekaggle (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCubeKaggle"]], "icecubeupgrade (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade"]], "feature_map() (graphnet.models.detector.icecube.icecube86 method)": [[52, "graphnet.models.detector.icecube.IceCube86.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubedeepcore method)": [[52, "graphnet.models.detector.icecube.IceCubeDeepCore.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubekaggle method)": [[52, "graphnet.models.detector.icecube.IceCubeKaggle.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubeupgrade method)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.feature_map"]], "geometry_table_path (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.geometry_table_path"]], "geometry_table_path (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.geometry_table_path"]], "graphnet.models.detector.icecube": [[52, "module-graphnet.models.detector.icecube"]], "sensor_id_column (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.sensor_id_column"]], "sensor_id_column (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.sensor_id_column"]], "string_id_column (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.string_id_column"]], "string_id_column (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.string_id_column"]], "xyz (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.xyz"]], "xyz (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.xyz"]], "orca150 (class in graphnet.models.detector.prometheus)": [[53, "graphnet.models.detector.prometheus.ORCA150"]], "prometheus (class in graphnet.models.detector.prometheus)": [[53, "graphnet.models.detector.prometheus.Prometheus"]], "feature_map() (graphnet.models.detector.prometheus.orca150 method)": [[53, "graphnet.models.detector.prometheus.ORCA150.feature_map"]], "geometry_table_path (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.geometry_table_path"]], "graphnet.models.detector.prometheus": [[53, "module-graphnet.models.detector.prometheus"]], "sensor_id_column (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.sensor_id_column"]], "string_id_column (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.string_id_column"]], "xyz (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.xyz"]], "graphnet.models.gnn": [[54, "module-graphnet.models.gnn"]], "convnet (class in graphnet.models.gnn.convnet)": [[55, "graphnet.models.gnn.convnet.ConvNet"]], "forward() (graphnet.models.gnn.convnet.convnet method)": [[55, "graphnet.models.gnn.convnet.ConvNet.forward"]], "graphnet.models.gnn.convnet": [[55, "module-graphnet.models.gnn.convnet"]], "dynedge (class in graphnet.models.gnn.dynedge)": [[56, "graphnet.models.gnn.dynedge.DynEdge"]], "forward() (graphnet.models.gnn.dynedge.dynedge method)": [[56, "graphnet.models.gnn.dynedge.DynEdge.forward"]], "graphnet.models.gnn.dynedge": [[56, "module-graphnet.models.gnn.dynedge"]], "dynedgejinst (class in graphnet.models.gnn.dynedge_jinst)": [[57, "graphnet.models.gnn.dynedge_jinst.DynEdgeJINST"]], "forward() (graphnet.models.gnn.dynedge_jinst.dynedgejinst method)": [[57, "graphnet.models.gnn.dynedge_jinst.DynEdgeJINST.forward"]], "graphnet.models.gnn.dynedge_jinst": [[57, "module-graphnet.models.gnn.dynedge_jinst"]], "dynedgetito (class in graphnet.models.gnn.dynedge_kaggle_tito)": [[58, "graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO"]], "forward() (graphnet.models.gnn.dynedge_kaggle_tito.dynedgetito method)": [[58, "graphnet.models.gnn.dynedge_kaggle_tito.DynEdgeTITO.forward"]], "graphnet.models.gnn.dynedge_kaggle_tito": [[58, "module-graphnet.models.gnn.dynedge_kaggle_tito"]], "gnn (class in graphnet.models.gnn.gnn)": [[59, "graphnet.models.gnn.gnn.GNN"]], "forward() (graphnet.models.gnn.gnn.gnn method)": [[59, "graphnet.models.gnn.gnn.GNN.forward"]], "graphnet.models.gnn.gnn": [[59, "module-graphnet.models.gnn.gnn"]], "nb_inputs (graphnet.models.gnn.gnn.gnn property)": [[59, "graphnet.models.gnn.gnn.GNN.nb_inputs"]], "nb_outputs (graphnet.models.gnn.gnn.gnn property)": [[59, "graphnet.models.gnn.gnn.GNN.nb_outputs"]], "graphnet.models.graphs": [[60, "module-graphnet.models.graphs"]], "graphnet.models.graphs.edges": [[61, "module-graphnet.models.graphs.edges"]], "edgedefinition (class in graphnet.models.graphs.edges.edges)": [[62, "graphnet.models.graphs.edges.edges.EdgeDefinition"]], "euclideanedges (class in graphnet.models.graphs.edges.edges)": [[62, "graphnet.models.graphs.edges.edges.EuclideanEdges"]], "knnedges (class in graphnet.models.graphs.edges.edges)": [[62, "graphnet.models.graphs.edges.edges.KNNEdges"]], "radialedges (class in graphnet.models.graphs.edges.edges)": [[62, "graphnet.models.graphs.edges.edges.RadialEdges"]], "forward() (graphnet.models.graphs.edges.edges.edgedefinition method)": [[62, "graphnet.models.graphs.edges.edges.EdgeDefinition.forward"]], "graphnet.models.graphs.edges.edges": [[62, "module-graphnet.models.graphs.edges.edges"]], "minkowskiknnedges (class in graphnet.models.graphs.edges.minkowski)": [[63, "graphnet.models.graphs.edges.minkowski.MinkowskiKNNEdges"]], "compute_minkowski_distance_mat() (in module graphnet.models.graphs.edges.minkowski)": [[63, "graphnet.models.graphs.edges.minkowski.compute_minkowski_distance_mat"]], "graphnet.models.graphs.edges.minkowski": [[63, "module-graphnet.models.graphs.edges.minkowski"]], "graphdefinition (class in graphnet.models.graphs.graph_definition)": [[64, "graphnet.models.graphs.graph_definition.GraphDefinition"]], "forward() (graphnet.models.graphs.graph_definition.graphdefinition method)": [[64, "graphnet.models.graphs.graph_definition.GraphDefinition.forward"]], "graphnet.models.graphs.graph_definition": [[64, "module-graphnet.models.graphs.graph_definition"]], "knngraph (class in graphnet.models.graphs.graphs)": [[65, "graphnet.models.graphs.graphs.KNNGraph"]], "graphnet.models.graphs.graphs": [[65, "module-graphnet.models.graphs.graphs"]], "graphnet.models.graphs.nodes": [[66, "module-graphnet.models.graphs.nodes"]], "nodedefinition (class in graphnet.models.graphs.nodes.nodes)": [[67, "graphnet.models.graphs.nodes.nodes.NodeDefinition"]], "nodesaspulses (class in graphnet.models.graphs.nodes.nodes)": [[67, "graphnet.models.graphs.nodes.nodes.NodesAsPulses"]], "percentileclusters (class in graphnet.models.graphs.nodes.nodes)": [[67, "graphnet.models.graphs.nodes.nodes.PercentileClusters"]], "forward() (graphnet.models.graphs.nodes.nodes.nodedefinition method)": [[67, "graphnet.models.graphs.nodes.nodes.NodeDefinition.forward"]], "graphnet.models.graphs.nodes.nodes": [[67, "module-graphnet.models.graphs.nodes.nodes"]], "nb_outputs (graphnet.models.graphs.nodes.nodes.nodedefinition property)": [[67, "graphnet.models.graphs.nodes.nodes.NodeDefinition.nb_outputs"]], "set_number_of_inputs() (graphnet.models.graphs.nodes.nodes.nodedefinition method)": [[67, "graphnet.models.graphs.nodes.nodes.NodeDefinition.set_number_of_inputs"]], "set_output_feature_names() (graphnet.models.graphs.nodes.nodes.nodedefinition method)": [[67, "graphnet.models.graphs.nodes.nodes.NodeDefinition.set_output_feature_names"]], "cluster_summarize_with_percentiles() (in module graphnet.models.graphs.utils)": [[68, "graphnet.models.graphs.utils.cluster_summarize_with_percentiles"]], "gather_cluster_sequence() (in module graphnet.models.graphs.utils)": [[68, "graphnet.models.graphs.utils.gather_cluster_sequence"]], "graphnet.models.graphs.utils": [[68, "module-graphnet.models.graphs.utils"]], "identify_indices() (in module graphnet.models.graphs.utils)": [[68, "graphnet.models.graphs.utils.identify_indices"]], "lex_sort() (in module graphnet.models.graphs.utils)": [[68, "graphnet.models.graphs.utils.lex_sort"]], "model (class in graphnet.models.model)": [[69, "graphnet.models.model.Model"]], "from_config() (graphnet.models.model.model class method)": [[69, "graphnet.models.model.Model.from_config"]], "graphnet.models.model": [[69, "module-graphnet.models.model"]], "load() (graphnet.models.model.model class method)": [[69, "graphnet.models.model.Model.load"]], "load_state_dict() (graphnet.models.model.model method)": [[69, "graphnet.models.model.Model.load_state_dict"]], "save() (graphnet.models.model.model method)": [[69, "graphnet.models.model.Model.save"]], "save_state_dict() (graphnet.models.model.model method)": [[69, "graphnet.models.model.Model.save_state_dict"]], "standardmodel (class in graphnet.models.standard_model)": [[70, "graphnet.models.standard_model.StandardModel"]], "compute_loss() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.compute_loss"]], "configure_optimizers() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.configure_optimizers"]], "fit() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.fit"]], "forward() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.forward"]], "graphnet.models.standard_model": [[70, "module-graphnet.models.standard_model"]], "inference() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.inference"]], "predict() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.predict"]], "predict_as_dataframe() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.predict_as_dataframe"]], "prediction_labels (graphnet.models.standard_model.standardmodel property)": [[70, "graphnet.models.standard_model.StandardModel.prediction_labels"]], "shared_step() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.shared_step"]], "target_labels (graphnet.models.standard_model.standardmodel property)": [[70, "graphnet.models.standard_model.StandardModel.target_labels"]], "train() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.train"]], "training_step() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.training_step"]], "validation_step() (graphnet.models.standard_model.standardmodel method)": [[70, "graphnet.models.standard_model.StandardModel.validation_step"]], "graphnet.models.task": [[71, "module-graphnet.models.task"]], "binaryclassificationtask (class in graphnet.models.task.classification)": [[72, "graphnet.models.task.classification.BinaryClassificationTask"]], "binaryclassificationtasklogits (class in graphnet.models.task.classification)": [[72, "graphnet.models.task.classification.BinaryClassificationTaskLogits"]], "multiclassclassificationtask (class in graphnet.models.task.classification)": [[72, "graphnet.models.task.classification.MulticlassClassificationTask"]], "default_prediction_labels (graphnet.models.task.classification.binaryclassificationtask attribute)": [[72, "graphnet.models.task.classification.BinaryClassificationTask.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.classification.binaryclassificationtasklogits attribute)": [[72, "graphnet.models.task.classification.BinaryClassificationTaskLogits.default_prediction_labels"]], "default_target_labels (graphnet.models.task.classification.binaryclassificationtask attribute)": [[72, "graphnet.models.task.classification.BinaryClassificationTask.default_target_labels"]], "default_target_labels (graphnet.models.task.classification.binaryclassificationtasklogits attribute)": [[72, "graphnet.models.task.classification.BinaryClassificationTaskLogits.default_target_labels"]], "graphnet.models.task.classification": [[72, "module-graphnet.models.task.classification"]], "nb_inputs (graphnet.models.task.classification.binaryclassificationtask attribute)": [[72, "graphnet.models.task.classification.BinaryClassificationTask.nb_inputs"]], "nb_inputs (graphnet.models.task.classification.binaryclassificationtasklogits attribute)": [[72, "graphnet.models.task.classification.BinaryClassificationTaskLogits.nb_inputs"]], "azimuthreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstruction"]], "azimuthreconstructionwithkappa (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa"]], "directionreconstructionwithkappa (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa"]], "energyreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.EnergyReconstruction"]], "energyreconstructionwithpower (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower"]], "energyreconstructionwithuncertainty (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty"]], "energytcreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.EnergyTCReconstruction"]], "inelasticityreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.InelasticityReconstruction"]], "positionreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.PositionReconstruction"]], "timereconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.TimeReconstruction"]], "vertexreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.VertexReconstruction"]], "zenithreconstruction (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.ZenithReconstruction"]], "zenithreconstructionwithkappa (class in graphnet.models.task.reconstruction)": [[73, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa"]], "default_prediction_labels (graphnet.models.task.reconstruction.azimuthreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.azimuthreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.directionreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energyreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energyreconstructionwithpower attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energyreconstructionwithuncertainty attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.energytcreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.EnergyTCReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.inelasticityreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.InelasticityReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.positionreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.PositionReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.timereconstruction attribute)": [[73, "graphnet.models.task.reconstruction.TimeReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.vertexreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.VertexReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.zenithreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.ZenithReconstruction.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.reconstruction.zenithreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa.default_prediction_labels"]], "default_target_labels (graphnet.models.task.reconstruction.azimuthreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.azimuthreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.directionreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energyreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energyreconstructionwithpower attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energyreconstructionwithuncertainty attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.energytcreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.EnergyTCReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.inelasticityreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.InelasticityReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.positionreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.PositionReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.timereconstruction attribute)": [[73, "graphnet.models.task.reconstruction.TimeReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.vertexreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.VertexReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.zenithreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.ZenithReconstruction.default_target_labels"]], "default_target_labels (graphnet.models.task.reconstruction.zenithreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa.default_target_labels"]], "graphnet.models.task.reconstruction": [[73, "module-graphnet.models.task.reconstruction"]], "nb_inputs (graphnet.models.task.reconstruction.azimuthreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.azimuthreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.AzimuthReconstructionWithKappa.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.directionreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.DirectionReconstructionWithKappa.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energyreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energyreconstructionwithpower attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithPower.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energyreconstructionwithuncertainty attribute)": [[73, "graphnet.models.task.reconstruction.EnergyReconstructionWithUncertainty.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.energytcreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.EnergyTCReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.inelasticityreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.InelasticityReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.positionreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.PositionReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.timereconstruction attribute)": [[73, "graphnet.models.task.reconstruction.TimeReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.vertexreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.VertexReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.zenithreconstruction attribute)": [[73, "graphnet.models.task.reconstruction.ZenithReconstruction.nb_inputs"]], "nb_inputs (graphnet.models.task.reconstruction.zenithreconstructionwithkappa attribute)": [[73, "graphnet.models.task.reconstruction.ZenithReconstructionWithKappa.nb_inputs"]], "identitytask (class in graphnet.models.task.task)": [[74, "graphnet.models.task.task.IdentityTask"]], "learnedtask (class in graphnet.models.task.task)": [[74, "graphnet.models.task.task.LearnedTask"]], "standardflowtask (class in graphnet.models.task.task)": [[74, "graphnet.models.task.task.StandardFlowTask"]], "standardlearnedtask (class in graphnet.models.task.task)": [[74, "graphnet.models.task.task.StandardLearnedTask"]], "task (class in graphnet.models.task.task)": [[74, "graphnet.models.task.task.Task"]], "compute_loss() (graphnet.models.task.task.learnedtask method)": [[74, "graphnet.models.task.task.LearnedTask.compute_loss"]], "compute_loss() (graphnet.models.task.task.standardflowtask method)": [[74, "graphnet.models.task.task.StandardFlowTask.compute_loss"]], "compute_loss() (graphnet.models.task.task.standardlearnedtask method)": [[74, "graphnet.models.task.task.StandardLearnedTask.compute_loss"]], "default_prediction_labels (graphnet.models.task.task.identitytask property)": [[74, "graphnet.models.task.task.IdentityTask.default_prediction_labels"]], "default_prediction_labels (graphnet.models.task.task.task property)": [[74, "graphnet.models.task.task.Task.default_prediction_labels"]], "default_target_labels (graphnet.models.task.task.identitytask property)": [[74, "graphnet.models.task.task.IdentityTask.default_target_labels"]], "default_target_labels (graphnet.models.task.task.task property)": [[74, "graphnet.models.task.task.Task.default_target_labels"]], "forward() (graphnet.models.task.task.learnedtask method)": [[74, "graphnet.models.task.task.LearnedTask.forward"]], "forward() (graphnet.models.task.task.standardflowtask method)": [[74, "graphnet.models.task.task.StandardFlowTask.forward"]], "graphnet.models.task.task": [[74, "module-graphnet.models.task.task"]], "inference() (graphnet.models.task.task.task method)": [[74, "graphnet.models.task.task.Task.inference"]], "nb_inputs (graphnet.models.task.task.identitytask property)": [[74, "graphnet.models.task.task.IdentityTask.nb_inputs"]], "nb_inputs (graphnet.models.task.task.learnedtask property)": [[74, "graphnet.models.task.task.LearnedTask.nb_inputs"]], "nb_inputs (graphnet.models.task.task.standardlearnedtask property)": [[74, "graphnet.models.task.task.StandardLearnedTask.nb_inputs"]], "nb_inputs (graphnet.models.task.task.task property)": [[74, "graphnet.models.task.task.Task.nb_inputs"]], "nb_inputs() (graphnet.models.task.task.standardflowtask method)": [[74, "graphnet.models.task.task.StandardFlowTask.nb_inputs"]], "train_eval() (graphnet.models.task.task.task method)": [[74, "graphnet.models.task.task.Task.train_eval"]], "calculate_distance_matrix() (in module graphnet.models.utils)": [[75, "graphnet.models.utils.calculate_distance_matrix"]], "calculate_xyzt_homophily() (in module graphnet.models.utils)": [[75, "graphnet.models.utils.calculate_xyzt_homophily"]], "graphnet.models.utils": [[75, "module-graphnet.models.utils"]], "knn_graph_batch() (in module graphnet.models.utils)": [[75, "graphnet.models.utils.knn_graph_batch"]], "graphnet.pisa": [[76, "module-graphnet.pisa"]], "contourfitter (class in graphnet.pisa.fitting)": [[77, "graphnet.pisa.fitting.ContourFitter"]], "weightfitter (class in graphnet.pisa.fitting)": [[77, "graphnet.pisa.fitting.WeightFitter"]], "config_updater() (in module graphnet.pisa.fitting)": [[77, "graphnet.pisa.fitting.config_updater"]], "fit_1d_contour() (graphnet.pisa.fitting.contourfitter method)": [[77, "graphnet.pisa.fitting.ContourFitter.fit_1d_contour"]], "fit_2d_contour() (graphnet.pisa.fitting.contourfitter method)": [[77, "graphnet.pisa.fitting.ContourFitter.fit_2d_contour"]], "fit_weights() (graphnet.pisa.fitting.weightfitter method)": [[77, "graphnet.pisa.fitting.WeightFitter.fit_weights"]], "graphnet.pisa.fitting": [[77, "module-graphnet.pisa.fitting"]], "graphnet.pisa.plotting": [[78, "module-graphnet.pisa.plotting"]], "plot_1d_contour() (in module graphnet.pisa.plotting)": [[78, "graphnet.pisa.plotting.plot_1D_contour"]], "plot_2d_contour() (in module graphnet.pisa.plotting)": [[78, "graphnet.pisa.plotting.plot_2D_contour"]], "read_entry() (in module graphnet.pisa.plotting)": [[78, "graphnet.pisa.plotting.read_entry"]], "graphnet.training": [[79, "module-graphnet.training"]], "graphnetearlystopping (class in graphnet.training.callbacks)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping"]], "piecewiselinearlr (class in graphnet.training.callbacks)": [[80, "graphnet.training.callbacks.PiecewiseLinearLR"]], "progressbar (class in graphnet.training.callbacks)": [[80, "graphnet.training.callbacks.ProgressBar"]], "get_lr() (graphnet.training.callbacks.piecewiselinearlr method)": [[80, "graphnet.training.callbacks.PiecewiseLinearLR.get_lr"]], "get_metrics() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.get_metrics"]], "graphnet.training.callbacks": [[80, "module-graphnet.training.callbacks"]], "init_predict_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_predict_tqdm"]], "init_test_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_test_tqdm"]], "init_train_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_train_tqdm"]], "init_validation_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_validation_tqdm"]], "on_fit_end() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.on_fit_end"]], "on_train_epoch_end() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.on_train_epoch_end"]], "on_train_epoch_end() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.on_train_epoch_end"]], "on_train_epoch_start() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.on_train_epoch_start"]], "on_validation_end() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.on_validation_end"]], "setup() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.setup"]], "direction (class in graphnet.training.labels)": [[81, "graphnet.training.labels.Direction"]], "label (class in graphnet.training.labels)": [[81, "graphnet.training.labels.Label"]], "graphnet.training.labels": [[81, "module-graphnet.training.labels"]], "key (graphnet.training.labels.label property)": [[81, "graphnet.training.labels.Label.key"]], "binarycrossentropyloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.BinaryCrossEntropyLoss"]], "crossentropyloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.CrossEntropyLoss"]], "euclideandistanceloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.EuclideanDistanceLoss"]], "logcmk (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.LogCMK"]], "logcoshloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.LogCoshLoss"]], "lossfunction (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.LossFunction"]], "mseloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.MSELoss"]], "rmseloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.RMSELoss"]], "vonmisesfisher2dloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.VonMisesFisher2DLoss"]], "vonmisesfisher3dloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.VonMisesFisher3DLoss"]], "vonmisesfisherloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss"]], "backward() (graphnet.training.loss_functions.logcmk static method)": [[82, "graphnet.training.loss_functions.LogCMK.backward"]], "forward() (graphnet.training.loss_functions.logcmk static method)": [[82, "graphnet.training.loss_functions.LogCMK.forward"]], "forward() (graphnet.training.loss_functions.lossfunction method)": [[82, "graphnet.training.loss_functions.LossFunction.forward"]], "graphnet.training.loss_functions": [[82, "module-graphnet.training.loss_functions"]], "log_cmk() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk"]], "log_cmk_approx() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk_approx"]], "log_cmk_exact() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk_exact"]], "collate_fn() (in module graphnet.training.utils)": [[83, "graphnet.training.utils.collate_fn"]], "collator_sequence_buckleting (class in graphnet.training.utils)": [[83, "graphnet.training.utils.collator_sequence_buckleting"]], "get_predictions() (in module graphnet.training.utils)": [[83, "graphnet.training.utils.get_predictions"]], "graphnet.training.utils": [[83, "module-graphnet.training.utils"]], "make_dataloader() (in module graphnet.training.utils)": [[83, "graphnet.training.utils.make_dataloader"]], "make_train_validation_dataloader() (in module graphnet.training.utils)": [[83, "graphnet.training.utils.make_train_validation_dataloader"]], "save_results() (in module graphnet.training.utils)": [[83, "graphnet.training.utils.save_results"]], "bjoernlow (class in graphnet.training.weight_fitting)": [[84, "graphnet.training.weight_fitting.BjoernLow"]], "uniform (class in graphnet.training.weight_fitting)": [[84, "graphnet.training.weight_fitting.Uniform"]], "weightfitter (class in graphnet.training.weight_fitting)": [[84, "graphnet.training.weight_fitting.WeightFitter"]], "fit() (graphnet.training.weight_fitting.weightfitter method)": [[84, "graphnet.training.weight_fitting.WeightFitter.fit"]], "graphnet.training.weight_fitting": [[84, "module-graphnet.training.weight_fitting"]], "graphnet.utilities": [[85, "module-graphnet.utilities"]], "argumentparser (class in graphnet.utilities.argparse)": [[86, "graphnet.utilities.argparse.ArgumentParser"]], "options (class in graphnet.utilities.argparse)": [[86, "graphnet.utilities.argparse.Options"]], "contains() (graphnet.utilities.argparse.options method)": [[86, "graphnet.utilities.argparse.Options.contains"]], "graphnet.utilities.argparse": [[86, "module-graphnet.utilities.argparse"]], "pop_default() (graphnet.utilities.argparse.options method)": [[86, "graphnet.utilities.argparse.Options.pop_default"]], "standard_arguments (graphnet.utilities.argparse.argumentparser attribute)": [[86, "graphnet.utilities.argparse.ArgumentParser.standard_arguments"]], "with_standard_arguments() (graphnet.utilities.argparse.argumentparser method)": [[86, "graphnet.utilities.argparse.ArgumentParser.with_standard_arguments"]], "graphnet.utilities.config": [[87, "module-graphnet.utilities.config"]], "baseconfig (class in graphnet.utilities.config.base_config)": [[88, "graphnet.utilities.config.base_config.BaseConfig"]], "as_dict() (graphnet.utilities.config.base_config.baseconfig method)": [[88, "graphnet.utilities.config.base_config.BaseConfig.as_dict"]], "dump() (graphnet.utilities.config.base_config.baseconfig method)": [[88, "graphnet.utilities.config.base_config.BaseConfig.dump"]], "get_all_argument_values() (in module graphnet.utilities.config.base_config)": [[88, "graphnet.utilities.config.base_config.get_all_argument_values"]], "graphnet.utilities.config.base_config": [[88, "module-graphnet.utilities.config.base_config"]], "load() (graphnet.utilities.config.base_config.baseconfig class method)": [[88, "graphnet.utilities.config.base_config.BaseConfig.load"]], "model_config (graphnet.utilities.config.base_config.baseconfig attribute)": [[88, "graphnet.utilities.config.base_config.BaseConfig.model_config"]], "model_fields (graphnet.utilities.config.base_config.baseconfig attribute)": [[88, "graphnet.utilities.config.base_config.BaseConfig.model_fields"]], "configurable (class in graphnet.utilities.config.configurable)": [[89, "graphnet.utilities.config.configurable.Configurable"]], "config (graphnet.utilities.config.configurable.configurable property)": [[89, "graphnet.utilities.config.configurable.Configurable.config"]], "from_config() (graphnet.utilities.config.configurable.configurable class method)": [[89, "graphnet.utilities.config.configurable.Configurable.from_config"]], "graphnet.utilities.config.configurable": [[89, "module-graphnet.utilities.config.configurable"]], "save_config() (graphnet.utilities.config.configurable.configurable method)": [[89, "graphnet.utilities.config.configurable.Configurable.save_config"]], "datasetconfig (class in graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig"]], "datasetconfigsaverabcmeta (class in graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfigSaverABCMeta"]], "datasetconfigsavermeta (class in graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfigSaverMeta"]], "as_dict() (graphnet.utilities.config.dataset_config.datasetconfig method)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.as_dict"]], "features (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.features"]], "graph_definition (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.graph_definition"]], "graphnet.utilities.config.dataset_config": [[90, "module-graphnet.utilities.config.dataset_config"]], "index_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.index_column"]], "loss_weight_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_column"]], "loss_weight_default_value (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_default_value"]], "loss_weight_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_table"]], "model_config (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.model_config"]], "model_fields (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.model_fields"]], "node_truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth"]], "node_truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth_table"]], "path (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.path"]], "pulsemaps (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.pulsemaps"]], "save_dataset_config() (in module graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.save_dataset_config"]], "seed (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.seed"]], "selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.selection"]], "string_selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.string_selection"]], "truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.truth"]], "truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.truth_table"]], "modelconfig (class in graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.ModelConfig"]], "modelconfigsaverabc (class in graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.ModelConfigSaverABC"]], "modelconfigsavermeta (class in graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.ModelConfigSaverMeta"]], "arguments (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.arguments"]], "as_dict() (graphnet.utilities.config.model_config.modelconfig method)": [[91, "graphnet.utilities.config.model_config.ModelConfig.as_dict"]], "class_name (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.class_name"]], "graphnet.utilities.config.model_config": [[91, "module-graphnet.utilities.config.model_config"]], "model_config (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.model_config"]], "model_fields (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.model_fields"]], "save_model_config() (in module graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.save_model_config"]], "get_all_grapnet_classes() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.get_all_grapnet_classes"]], "get_graphnet_classes() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.get_graphnet_classes"]], "graphnet.utilities.config.parsing": [[92, "module-graphnet.utilities.config.parsing"]], "is_graphnet_class() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.is_graphnet_class"]], "is_graphnet_module() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.is_graphnet_module"]], "list_all_submodules() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.list_all_submodules"]], "traverse_and_apply() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.traverse_and_apply"]], "trainingconfig (class in graphnet.utilities.config.training_config)": [[93, "graphnet.utilities.config.training_config.TrainingConfig"]], "dataloader (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.dataloader"]], "early_stopping_patience (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.early_stopping_patience"]], "fit (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.fit"]], "graphnet.utilities.config.training_config": [[93, "module-graphnet.utilities.config.training_config"]], "model_config (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.model_config"]], "model_fields (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.model_fields"]], "target (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.target"]], "graphnet.utilities.decorators": [[94, "module-graphnet.utilities.decorators"]], "graphnet.utilities.deprecation_tools": [[95, "module-graphnet.utilities.deprecation_tools"]], "rename_state_dict_entries() (in module graphnet.utilities.deprecation_tools)": [[95, "graphnet.utilities.deprecation_tools.rename_state_dict_entries"]], "find_i3_files() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.find_i3_files"]], "graphnet.utilities.filesys": [[96, "module-graphnet.utilities.filesys"]], "has_extension() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.has_extension"]], "is_gcd_file() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.is_gcd_file"]], "is_i3_file() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.is_i3_file"]], "graphnet.utilities.imports": [[97, "module-graphnet.utilities.imports"]], "has_icecube_package() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.has_icecube_package"]], "has_pisa_package() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.has_pisa_package"]], "has_torch_package() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.has_torch_package"]], "requires_icecube() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.requires_icecube"]], "logger (class in graphnet.utilities.logging)": [[98, "graphnet.utilities.logging.Logger"]], "repeatfilter (class in graphnet.utilities.logging)": [[98, "graphnet.utilities.logging.RepeatFilter"]], "critical() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.critical"]], "debug() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.debug"]], "error() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.error"]], "file_handlers (graphnet.utilities.logging.logger property)": [[98, "graphnet.utilities.logging.Logger.file_handlers"]], "filter() (graphnet.utilities.logging.repeatfilter method)": [[98, "graphnet.utilities.logging.RepeatFilter.filter"]], "graphnet.utilities.logging": [[98, "module-graphnet.utilities.logging"]], "handlers (graphnet.utilities.logging.logger property)": [[98, "graphnet.utilities.logging.Logger.handlers"]], "info() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.info"]], "nb_repeats_allowed (graphnet.utilities.logging.repeatfilter attribute)": [[98, "graphnet.utilities.logging.RepeatFilter.nb_repeats_allowed"]], "setlevel() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.setLevel"]], "stream_handlers (graphnet.utilities.logging.logger property)": [[98, "graphnet.utilities.logging.Logger.stream_handlers"]], "warning() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.warning"]], "warning_once() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.warning_once"]], "eps_like() (in module graphnet.utilities.maths)": [[99, "graphnet.utilities.maths.eps_like"]], "graphnet.utilities.maths": [[99, "module-graphnet.utilities.maths"]]}}) \ No newline at end of file +Search.setIndex({"docnames": ["about", "api/graphnet", "api/graphnet.constants", "api/graphnet.data", "api/graphnet.data.constants", "api/graphnet.data.dataconverter", "api/graphnet.data.dataloader", "api/graphnet.data.dataset", "api/graphnet.data.dataset.dataset", "api/graphnet.data.dataset.parquet", "api/graphnet.data.dataset.parquet.parquet_dataset", "api/graphnet.data.dataset.sqlite", "api/graphnet.data.dataset.sqlite.sqlite_dataset", "api/graphnet.data.extractors", "api/graphnet.data.extractors.i3extractor", "api/graphnet.data.extractors.i3featureextractor", "api/graphnet.data.extractors.i3genericextractor", "api/graphnet.data.extractors.i3hybridrecoextractor", "api/graphnet.data.extractors.i3ntmuonlabelsextractor", "api/graphnet.data.extractors.i3particleextractor", "api/graphnet.data.extractors.i3pisaextractor", "api/graphnet.data.extractors.i3quesoextractor", "api/graphnet.data.extractors.i3retroextractor", "api/graphnet.data.extractors.i3splinempeextractor", "api/graphnet.data.extractors.i3truthextractor", "api/graphnet.data.extractors.i3tumextractor", "api/graphnet.data.extractors.utilities", "api/graphnet.data.extractors.utilities.collections", "api/graphnet.data.extractors.utilities.frames", "api/graphnet.data.extractors.utilities.types", "api/graphnet.data.filters", "api/graphnet.data.parquet", "api/graphnet.data.parquet.parquet_dataconverter", "api/graphnet.data.pipeline", "api/graphnet.data.sqlite", "api/graphnet.data.sqlite.sqlite_dataconverter", "api/graphnet.data.sqlite.sqlite_utilities", "api/graphnet.data.utilities", "api/graphnet.data.utilities.parquet_to_sqlite", "api/graphnet.data.utilities.random", "api/graphnet.data.utilities.string_selection_resolver", "api/graphnet.deployment", "api/graphnet.deployment.i3modules", "api/graphnet.deployment.i3modules.deployer", "api/graphnet.deployment.i3modules.graphnet_module", "api/graphnet.models", "api/graphnet.models.coarsening", "api/graphnet.models.components", "api/graphnet.models.components.layers", "api/graphnet.models.components.pool", "api/graphnet.models.detector", "api/graphnet.models.detector.detector", "api/graphnet.models.detector.icecube", "api/graphnet.models.detector.prometheus", "api/graphnet.models.gnn", "api/graphnet.models.gnn.convnet", "api/graphnet.models.gnn.dynedge", "api/graphnet.models.gnn.dynedge_jinst", "api/graphnet.models.gnn.dynedge_kaggle_tito", "api/graphnet.models.gnn.gnn", "api/graphnet.models.graphs", "api/graphnet.models.graphs.edges", "api/graphnet.models.graphs.edges.edges", "api/graphnet.models.graphs.edges.minkowski", "api/graphnet.models.graphs.graph_definition", "api/graphnet.models.graphs.graphs", "api/graphnet.models.graphs.nodes", "api/graphnet.models.graphs.nodes.nodes", "api/graphnet.models.graphs.utils", "api/graphnet.models.model", "api/graphnet.models.standard_model", "api/graphnet.models.task", "api/graphnet.models.task.classification", "api/graphnet.models.task.reconstruction", "api/graphnet.models.task.task", "api/graphnet.models.utils", "api/graphnet.pisa", "api/graphnet.pisa.fitting", "api/graphnet.pisa.plotting", "api/graphnet.training", "api/graphnet.training.callbacks", "api/graphnet.training.labels", "api/graphnet.training.loss_functions", "api/graphnet.training.utils", "api/graphnet.training.weight_fitting", "api/graphnet.utilities", "api/graphnet.utilities.argparse", "api/graphnet.utilities.config", "api/graphnet.utilities.config.base_config", "api/graphnet.utilities.config.configurable", "api/graphnet.utilities.config.dataset_config", "api/graphnet.utilities.config.model_config", "api/graphnet.utilities.config.parsing", "api/graphnet.utilities.config.training_config", "api/graphnet.utilities.decorators", "api/graphnet.utilities.deprecation_tools", "api/graphnet.utilities.filesys", "api/graphnet.utilities.imports", "api/graphnet.utilities.logging", "api/graphnet.utilities.maths", "api/modules", "contribute", "index", "install"], "filenames": ["about.md", "api/graphnet.rst", "api/graphnet.constants.rst", "api/graphnet.data.rst", "api/graphnet.data.constants.rst", "api/graphnet.data.dataconverter.rst", "api/graphnet.data.dataloader.rst", "api/graphnet.data.dataset.rst", "api/graphnet.data.dataset.dataset.rst", "api/graphnet.data.dataset.parquet.rst", "api/graphnet.data.dataset.parquet.parquet_dataset.rst", "api/graphnet.data.dataset.sqlite.rst", "api/graphnet.data.dataset.sqlite.sqlite_dataset.rst", "api/graphnet.data.extractors.rst", "api/graphnet.data.extractors.i3extractor.rst", "api/graphnet.data.extractors.i3featureextractor.rst", "api/graphnet.data.extractors.i3genericextractor.rst", "api/graphnet.data.extractors.i3hybridrecoextractor.rst", "api/graphnet.data.extractors.i3ntmuonlabelsextractor.rst", "api/graphnet.data.extractors.i3particleextractor.rst", "api/graphnet.data.extractors.i3pisaextractor.rst", "api/graphnet.data.extractors.i3quesoextractor.rst", "api/graphnet.data.extractors.i3retroextractor.rst", "api/graphnet.data.extractors.i3splinempeextractor.rst", "api/graphnet.data.extractors.i3truthextractor.rst", "api/graphnet.data.extractors.i3tumextractor.rst", "api/graphnet.data.extractors.utilities.rst", "api/graphnet.data.extractors.utilities.collections.rst", "api/graphnet.data.extractors.utilities.frames.rst", "api/graphnet.data.extractors.utilities.types.rst", "api/graphnet.data.filters.rst", "api/graphnet.data.parquet.rst", "api/graphnet.data.parquet.parquet_dataconverter.rst", "api/graphnet.data.pipeline.rst", "api/graphnet.data.sqlite.rst", "api/graphnet.data.sqlite.sqlite_dataconverter.rst", "api/graphnet.data.sqlite.sqlite_utilities.rst", "api/graphnet.data.utilities.rst", "api/graphnet.data.utilities.parquet_to_sqlite.rst", "api/graphnet.data.utilities.random.rst", "api/graphnet.data.utilities.string_selection_resolver.rst", "api/graphnet.deployment.rst", "api/graphnet.deployment.i3modules.rst", "api/graphnet.deployment.i3modules.deployer.rst", "api/graphnet.deployment.i3modules.graphnet_module.rst", "api/graphnet.models.rst", "api/graphnet.models.coarsening.rst", "api/graphnet.models.components.rst", "api/graphnet.models.components.layers.rst", "api/graphnet.models.components.pool.rst", "api/graphnet.models.detector.rst", "api/graphnet.models.detector.detector.rst", "api/graphnet.models.detector.icecube.rst", "api/graphnet.models.detector.prometheus.rst", "api/graphnet.models.gnn.rst", "api/graphnet.models.gnn.convnet.rst", "api/graphnet.models.gnn.dynedge.rst", "api/graphnet.models.gnn.dynedge_jinst.rst", "api/graphnet.models.gnn.dynedge_kaggle_tito.rst", "api/graphnet.models.gnn.gnn.rst", "api/graphnet.models.graphs.rst", "api/graphnet.models.graphs.edges.rst", "api/graphnet.models.graphs.edges.edges.rst", "api/graphnet.models.graphs.edges.minkowski.rst", "api/graphnet.models.graphs.graph_definition.rst", "api/graphnet.models.graphs.graphs.rst", "api/graphnet.models.graphs.nodes.rst", "api/graphnet.models.graphs.nodes.nodes.rst", "api/graphnet.models.graphs.utils.rst", "api/graphnet.models.model.rst", "api/graphnet.models.standard_model.rst", "api/graphnet.models.task.rst", "api/graphnet.models.task.classification.rst", "api/graphnet.models.task.reconstruction.rst", "api/graphnet.models.task.task.rst", "api/graphnet.models.utils.rst", "api/graphnet.pisa.rst", "api/graphnet.pisa.fitting.rst", "api/graphnet.pisa.plotting.rst", "api/graphnet.training.rst", "api/graphnet.training.callbacks.rst", "api/graphnet.training.labels.rst", "api/graphnet.training.loss_functions.rst", "api/graphnet.training.utils.rst", "api/graphnet.training.weight_fitting.rst", "api/graphnet.utilities.rst", "api/graphnet.utilities.argparse.rst", "api/graphnet.utilities.config.rst", "api/graphnet.utilities.config.base_config.rst", "api/graphnet.utilities.config.configurable.rst", "api/graphnet.utilities.config.dataset_config.rst", "api/graphnet.utilities.config.model_config.rst", "api/graphnet.utilities.config.parsing.rst", "api/graphnet.utilities.config.training_config.rst", "api/graphnet.utilities.decorators.rst", "api/graphnet.utilities.deprecation_tools.rst", "api/graphnet.utilities.filesys.rst", "api/graphnet.utilities.imports.rst", "api/graphnet.utilities.logging.rst", "api/graphnet.utilities.maths.rst", "api/modules.rst", "contribute.md", "index.rst", "install.md"], "titles": ["About", "API", "constants", "data", "constants", "dataconverter", "dataloader", "dataset", "dataset", "parquet", "parquet_dataset", "sqlite", "sqlite_dataset", "extractors", "i3extractor", "i3featureextractor", "i3genericextractor", "i3hybridrecoextractor", "i3ntmuonlabelsextractor", "i3particleextractor", "i3pisaextractor", "i3quesoextractor", "i3retroextractor", "i3splinempeextractor", "i3truthextractor", "i3tumextractor", "utilities", "collections", "frames", "types", "filters", "parquet", "parquet_dataconverter", "pipeline", "sqlite", "sqlite_dataconverter", "sqlite_utilities", "utilities", "parquet_to_sqlite", "random", "string_selection_resolver", "deployment", "i3modules", "deployer", "graphnet_module", "models", "coarsening", "components", "layers", "pool", "detector", "detector", "icecube", "prometheus", "gnn", "convnet", "dynedge", "dynedge_jinst", "dynedge_kaggle_tito", "gnn", "graphs", "edges", "edges", "minkowski", "graph_definition", "graphs", "nodes", "nodes", "utils", "model", "standard_model", "task", "classification", "reconstruction", "task", "utils", "pisa", "fitting", "plotting", "training", "callbacks", "labels", "loss_functions", "utils", "weight_fitting", "utilities", "argparse", "config", "base_config", "configurable", "dataset_config", "model_config", "parsing", "training_config", "decorators", "deprecation_tools", "filesys", "imports", "logging", "maths", "src", "Contribute", "About", "Install"], "terms": {"graphnet": [0, 1, 2, 3, 4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 35, 36, 37, 38, 39, 40, 41, 51, 52, 53, 69, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 101, 102, 103], "i": [0, 1, 14, 16, 27, 28, 29, 30, 35, 36, 39, 40, 78, 80, 81, 82, 84, 86, 88, 91, 92, 93, 95, 96, 97, 98, 101, 102, 103], "an": [0, 5, 29, 32, 35, 40, 82, 96, 98, 101, 102, 103], "open": [0, 101, 102], "sourc": [0, 4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 32, 35, 36, 38, 39, 40, 51, 52, 53, 69, 77, 78, 80, 81, 82, 84, 86, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 101, 102], "python": [0, 1, 5, 13, 14, 16, 27, 29, 101, 102, 103], "framework": [0, 102], "aim": [0, 1, 101, 102], "provid": [0, 1, 82, 101, 102, 103], "high": [0, 102], "qualiti": [0, 102], "user": [0, 80, 102, 103], "friendli": [0, 102], "end": [0, 1, 5, 32, 35, 80, 102], "function": [0, 5, 29, 36, 39, 52, 53, 69, 77, 78, 82, 85, 90, 91, 92, 95, 96, 97, 99, 102], "perform": [0, 102], "reconstruct": [0, 1, 15, 17, 18, 22, 23, 25, 41, 45, 71, 102], "task": [0, 1, 45, 82, 101, 102], "neutrino": [0, 1, 77, 102], "telescop": [0, 1, 102], "us": [0, 1, 2, 4, 5, 14, 19, 24, 26, 27, 32, 35, 36, 37, 38, 40, 41, 51, 69, 77, 80, 81, 82, 84, 85, 86, 87, 90, 91, 92, 97, 98, 101, 102, 103], "graph": [0, 1, 45, 51, 81, 101, 102], "neural": [0, 1, 102], "network": [0, 1, 102], "gnn": [0, 1, 45, 102, 103], "make": [0, 5, 84, 90, 91, 101, 102, 103], "fast": [0, 102, 103], "easi": [0, 102], "train": [0, 1, 40, 41, 80, 81, 82, 84, 86, 90, 91, 93, 100, 102, 103], "complex": [0, 102], "model": [0, 1, 41, 51, 52, 53, 78, 79, 80, 82, 86, 88, 90, 91, 93, 100, 102, 103], "can": [0, 1, 14, 16, 19, 38, 77, 78, 84, 86, 88, 90, 91, 101, 102, 103], "event": [0, 1, 21, 36, 38, 40, 77, 82, 84, 90, 102], "state": [0, 95, 102], "art": [0, 102], "arbitrari": [0, 102], "detector": [0, 1, 24, 45, 52, 53, 102], "configur": [0, 1, 69, 77, 85, 87, 88, 90, 91, 93, 98, 102], "infer": [0, 1, 41, 102, 103], "time": [0, 4, 36, 98, 102, 103], "ar": [0, 1, 4, 5, 16, 29, 30, 32, 35, 38, 40, 77, 82, 84, 90, 91, 101, 102, 103], "order": [0, 27, 102], "magnitud": [0, 102], "faster": [0, 102], "than": [0, 98, 102], "tradit": [0, 102], "techniqu": [0, 102], "common": [0, 1, 82, 90, 91, 94, 97, 102], "ml": [0, 1, 102], "develop": [0, 1, 101, 102, 103], "physicist": [0, 1, 102], "wish": [0, 101, 102], "tool": [0, 1, 102], "research": [0, 102], "By": [0, 38, 102], "unit": [0, 5, 97, 101, 102], "both": [0, 16, 78, 102], "group": [0, 5, 32, 35, 102], "increas": [0, 80, 102], "longev": [0, 102], "usabl": [0, 102], "individu": [0, 5, 102], "code": [0, 24, 36, 90, 91, 102], "contribut": [0, 102, 103], "from": [0, 1, 13, 14, 16, 18, 19, 21, 27, 28, 29, 30, 35, 38, 69, 78, 80, 81, 82, 88, 89, 90, 91, 93, 98, 101, 102, 103], "build": [0, 1, 51, 69, 88, 90, 91, 102], "gener": [0, 5, 16, 30, 82, 102], "reusabl": [0, 102], "softwar": [0, 82, 102], "packag": [0, 1, 39, 92, 96, 97, 101, 102, 103], "base": [0, 4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30, 32, 35, 38, 40, 51, 52, 53, 69, 77, 80, 81, 82, 84, 86, 88, 89, 90, 91, 93, 97, 98, 102], "engin": [0, 102], "best": [0, 80, 101, 102], "practic": [0, 101, 102], "lower": [0, 78, 102], "technic": [0, 102], "threshold": [0, 102], "most": [0, 1, 40, 102, 103], "scientif": [0, 1, 102], "problem": [0, 101, 102], "The": [0, 5, 27, 29, 35, 36, 77, 78, 80, 81, 82, 95, 102], "improv": [0, 1, 86, 102], "classif": [0, 1, 45, 71, 82, 102], "yield": [0, 77, 82, 102], "veri": [0, 40, 102], "accur": [0, 102], "e": [0, 1, 5, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 29, 32, 35, 36, 40, 51, 52, 53, 69, 80, 81, 82, 84, 88, 98, 101, 102, 103], "g": [0, 1, 5, 24, 27, 29, 32, 35, 36, 40, 84, 98, 101, 102, 103], "low": [0, 102], "energi": [0, 4, 84, 102], "observ": [0, 102], "icecub": [0, 1, 15, 28, 29, 45, 50, 97, 102, 103], "here": [0, 101, 102, 103], "implement": [0, 1, 5, 14, 31, 32, 34, 35, 82, 101, 102], "wa": [0, 102], "appli": [0, 14, 92, 102], "oscil": [0, 76, 102], "lead": [0, 102], "signific": [0, 102], "angular": [0, 102], "rang": [0, 102], "relev": [0, 1, 29, 39, 96, 101, 102], "studi": [0, 102], "furthermor": [0, 102], "shown": [0, 102], "could": [0, 101, 102], "muon": [0, 18, 102], "v": [0, 102], "therebi": [0, 1, 90, 91, 102], "effici": [0, 102], "puriti": [0, 102], "sampl": [0, 40, 102], "analysi": [0, 102, 103], "similarli": [0, 29, 102], "ha": [0, 5, 29, 32, 35, 36, 82, 96, 102, 103], "great": [0, 102], "point": [0, 23, 81, 82, 102], "analys": [0, 41, 76, 102], "final": [0, 80, 90, 102], "millisecond": [0, 102], "allow": [0, 41, 80, 88, 93, 102, 103], "whole": [0, 102], "new": [0, 1, 35, 88, 93, 101, 102], "type": [0, 5, 13, 14, 26, 27, 28, 32, 35, 36, 38, 39, 40, 51, 52, 53, 69, 77, 78, 80, 82, 84, 86, 88, 89, 90, 91, 92, 95, 96, 97, 98, 99, 101, 102], "cosmic": [0, 102], "alert": [0, 102], "which": [0, 14, 15, 24, 28, 40, 69, 77, 82, 86, 102, 103], "were": [0, 102], "previous": [0, 102], "unfeas": [0, 102], "possibl": [0, 27, 101, 102], "identifi": [0, 5, 24, 90, 91, 102], "10": [0, 52, 53, 86, 102], "tev": [0, 102], "monitor": [0, 102], "rate": [0, 80, 102], "direct": [0, 79, 81, 102], "real": [0, 102], "thi": [0, 3, 5, 14, 16, 29, 32, 35, 36, 39, 77, 78, 80, 82, 84, 88, 90, 91, 93, 98, 101, 102, 103], "enabl": [0, 3, 102], "first": [0, 80, 101, 102], "ever": [0, 102], "despit": [0, 102], "larg": [0, 82, 102], "background": [0, 102], "origin": [0, 77, 102], "compris": [0, 102], "number": [0, 5, 32, 35, 40, 80, 86, 102], "modul": [0, 3, 29, 41, 50, 69, 76, 79, 85, 87, 90, 91, 92, 93, 97, 102], "necessari": [0, 27, 101, 102], "workflow": [0, 102], "ingest": [0, 1, 3, 50, 102], "raw": [0, 102], "data": [0, 1, 4, 5, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 50, 51, 52, 53, 81, 86, 88, 90, 93, 97, 100, 102, 103], "domain": [0, 1, 3, 41, 102], "specif": [0, 1, 3, 5, 15, 29, 31, 32, 34, 35, 36, 41, 50, 51, 52, 53, 82, 101, 102, 103], "format": [0, 1, 3, 5, 27, 32, 35, 78, 90, 101, 102, 103], "deploi": [0, 1, 41, 102], "chain": [0, 1, 41, 102, 103], "illustr": [0, 101, 102], "figur": [0, 78, 102], "level": [0, 24, 30, 36, 98, 102, 103], "overview": [0, 102], "typic": [0, 27, 102], "convert": [0, 1, 3, 5, 27, 30, 32, 35, 38, 102, 103], "industri": [0, 3, 102], "standard": [0, 3, 4, 5, 30, 32, 35, 40, 52, 53, 86, 101, 102], "intermedi": [0, 1, 3, 5, 32, 35, 102, 103], "file": [0, 1, 3, 5, 14, 27, 30, 32, 35, 38, 39, 69, 77, 80, 82, 86, 87, 88, 89, 90, 91, 96, 98, 102, 103], "read": [0, 3, 27, 51, 102, 103], "simpl": [0, 102], "physic": [0, 1, 14, 28, 29, 41, 102], "orient": [0, 102], "compon": [0, 1, 45, 69, 102], "manag": [0, 14, 79, 102], "experi": [0, 1, 79, 102], "log": [0, 1, 79, 80, 82, 85, 102, 103], "deploy": [0, 1, 42, 100, 102], "modular": [0, 102], "subclass": [0, 102], "torch": [0, 69, 97, 102, 103], "nn": [0, 102], "mean": [0, 5, 32, 35, 82, 91, 102], "onli": [0, 1, 77, 84, 88, 91, 93, 97, 102, 103], "need": [0, 27, 69, 82, 95, 102, 103], "import": [0, 1, 36, 85, 102], "few": [0, 101, 102], "exist": [0, 35, 36, 81, 90, 102], "purpos": [0, 82, 102], "built": [0, 102], "them": [0, 1, 27, 77, 102, 103], "togeth": [0, 102], "form": [0, 88, 93, 102], "complet": [0, 102], "extend": [0, 1, 102], "suit": [0, 102], "through": [0, 82, 102], "layer": [0, 45, 47, 102], "connect": [0, 82, 102], "etc": [0, 82, 98, 102], "optimis": [0, 1, 102], "differ": [0, 14, 101, 102, 103], "track": [0, 14, 18, 101, 102], "These": [0, 101, 102], "prepar": [0, 82, 102], "satisfi": [0, 102], "o": [0, 102], "load": [0, 39, 69, 88, 90, 102], "requir": [0, 20, 36, 82, 90, 91, 93, 102, 103], "when": [0, 5, 27, 30, 32, 35, 36, 81, 98, 101, 102, 103], "batch": [0, 86, 102], "do": [0, 82, 90, 91, 101, 102, 103], "predict": [0, 19, 23, 25, 82, 102], "either": [0, 82, 102, 103], "contain": [0, 5, 27, 28, 32, 35, 69, 82, 84, 86, 102, 103], "imag": [0, 1, 101, 102, 103], "portabl": [0, 102], "depend": [0, 102, 103], "free": [0, 82, 102], "split": [0, 30, 102], "up": [0, 5, 32, 35, 101, 102, 103], "interfac": [0, 76, 90, 91, 102, 103], "block": [0, 1, 102], "pre": [0, 51, 81, 101, 102], "directli": [0, 14, 102], "while": [0, 16, 80, 102], "continu": [0, 82, 102], "expand": [0, 102], "": [0, 5, 14, 27, 35, 38, 51, 80, 84, 86, 90, 91, 98, 99, 102, 103], "capabl": [0, 102], "project": [0, 101, 102], "receiv": [0, 102], "fund": [0, 102], "european": [0, 102], "union": [0, 16, 27, 29, 90, 93, 96, 102], "horizon": [0, 102], "2020": [0, 102], "innov": [0, 102], "programm": [0, 102], "under": [0, 102], "mari": [0, 102], "sk\u0142odowska": [0, 102], "curi": [0, 102], "grant": [0, 82, 102], "agreement": [0, 101, 102], "No": [0, 102], "890778": [0, 102], "work": [0, 4, 28, 101, 102, 103], "rasmu": [0, 102], "\u00f8rs\u00f8e": [0, 102], "partli": [0, 102], "punch4nfdi": [0, 102], "consortium": [0, 102], "support": [0, 29, 101, 102, 103], "dfg": [0, 102], "nfdi": [0, 102], "39": [0, 102, 103], "1": [0, 5, 27, 32, 35, 40, 80, 82, 84, 90, 102, 103], "germani": [0, 102], "conveni": [1, 101, 103], "collabor": 1, "solv": [1, 101], "It": [1, 27, 36, 101], "leverag": 1, "advanc": 1, "machin": [1, 103], "learn": [1, 80, 103], "without": [1, 77, 82, 103], "have": [1, 5, 16, 32, 35, 36, 40, 101, 103], "expert": 1, "themselv": [1, 90, 91], "acceler": 1, "area": 1, "phyic": 1, "design": 1, "principl": 1, "all": [1, 5, 14, 16, 30, 32, 35, 36, 51, 69, 82, 88, 89, 90, 91, 92, 93, 98, 101, 103], "streamlin": 1, "process": [1, 5, 14, 51, 101, 103], "transform": [1, 84], "extens": [1, 96], "basic": 1, "across": [1, 2, 29, 37, 82, 85, 86, 87, 98], "variou": 1, "easili": 1, "architectur": 1, "main": [1, 101, 103], "featur": [1, 3, 4, 5, 15, 51, 90, 101], "i3": [1, 5, 14, 28, 29, 30, 32, 35, 39, 96, 103], "more": [1, 36, 39, 90, 91, 98], "index": [1, 5, 29, 36, 51, 80], "sqlite": [1, 3, 7, 35, 36, 38, 103], "suitabl": 1, "plug": 1, "plai": 1, "abstract": [1, 5, 51, 89], "awai": 1, "detail": [1, 80, 103], "expos": 1, "physicst": 1, "what": [1, 101], "i3modul": [1, 41], "includ": [1, 77, 82, 88, 101], "docker": 1, "run": [1, 38], "containeris": 1, "fashion": 1, "subpackag": [1, 3, 7, 13, 41, 45, 60, 85], "dataset": [1, 3, 18, 40, 86, 90], "extractor": [1, 3, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 32, 35], "parquet": [1, 3, 7, 32, 38, 52, 53, 103], "util": [1, 3, 13, 27, 28, 29, 36, 38, 39, 40, 45, 60, 79, 86, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 100], "constant": [1, 3, 100], "dataconvert": [1, 3, 32, 35], "dataload": [1, 3, 93], "filter": [1, 3, 98], "pipelin": [1, 3], "coarsen": [1, 45], "standard_model": [1, 45], "pisa": [1, 20, 77, 78, 97, 100, 103], "fit": [1, 76, 78, 82, 84, 93], "plot": [1, 76], "callback": [1, 79], "label": [1, 18, 21, 78, 79], "loss_funct": [1, 79], "weight_fit": [1, 79], "config": [1, 40, 77, 80, 82, 85, 86, 88, 89, 90, 91, 92, 93], "argpars": [1, 85], "decor": [1, 5, 85, 97], "deprecation_tool": [1, 85], "filesi": [1, 85], "math": [1, 85], "submodul": [1, 3, 7, 9, 11, 13, 26, 31, 34, 37, 42, 45, 47, 50, 54, 60, 61, 66, 71, 76, 79, 85, 87, 92], "global": [2, 4, 69], "i3extractor": [3, 5, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 35], "i3featureextractor": [3, 4, 13, 35], "i3genericextractor": [3, 13, 35], "i3hybridrecoextractor": [3, 13], "i3ntmuonlabelsextractor": [3, 13], "i3particleextractor": [3, 13], "i3pisaextractor": [3, 13], "i3quesoextractor": [3, 13], "i3retroextractor": [3, 13], "i3splinempeextractor": [3, 13], "i3truthextractor": [3, 4, 13], "i3tumextractor": [3, 13], "parquet_dataconvert": [3, 31], "sqlite_dataconvert": [3, 34], "sqlite_util": [3, 34], "parquet_to_sqlit": [3, 37], "random": [3, 37, 40, 90], "string_selection_resolv": [3, 37], "truth": [3, 4, 15, 24, 36, 84, 90], "fileset": [3, 5], "init_global_index": [3, 5], "cache_output_fil": [3, 5], "i3filt": [3, 5, 30, 32, 35], "nullspliti3filt": [3, 30], "i3filtermask": [3, 30], "class": [4, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 29, 30, 31, 32, 34, 35, 38, 40, 51, 52, 53, 69, 77, 80, 81, 82, 84, 86, 88, 89, 90, 91, 92, 93, 98, 101], "object": [4, 5, 14, 16, 27, 29, 51, 52, 53, 69, 77, 80, 82, 86, 88, 90, 91, 93, 98], "namespac": [4, 69, 90, 91], "name": [4, 5, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 30, 32, 35, 36, 38, 51, 77, 81, 84, 86, 88, 90, 91, 92, 93, 98, 101, 103], "icecube86": [4, 50, 52], "dom_x": [4, 52], "dom_i": [4, 52], "dom_z": [4, 52], "dom_tim": 4, "charg": [4, 82], "rde": 4, "pmt_area": 4, "deepcor": [4, 15, 52], "upgrad": [4, 15, 52, 103], "string": [4, 5, 27, 32, 35, 40, 51, 52, 88], "pmt_number": 4, "dom_numb": 4, "pmt_dir_x": 4, "pmt_dir_i": 4, "pmt_dir_z": 4, "dom_typ": 4, "prometheu": [4, 45, 50], "sensor_pos_x": [4, 53], "sensor_pos_i": [4, 53], "sensor_pos_z": [4, 53], "t": [4, 29, 36, 78, 80, 82, 103], "kaggl": [4, 52], "x": [4, 5, 24, 32, 35, 78, 82, 84], "y": [4, 24, 78, 103], "z": [4, 5, 24, 32, 35, 103], "auxiliari": 4, "energy_track": 4, "energy_cascad": 4, "position_x": 4, "position_i": 4, "position_z": 4, "azimuth": [4, 81], "zenith": [4, 81], "pid": [4, 40, 90], "elast": 4, "sim_typ": 4, "interaction_typ": 4, "interaction_tim": 4, "inelast": 4, "stopped_muon": 4, "injection_energi": 4, "injection_typ": 4, "injection_interaction_typ": 4, "injection_zenith": 4, "injection_azimuth": 4, "injection_bjorkenx": 4, "injection_bjorkeni": 4, "injection_position_x": 4, "injection_position_i": 4, "injection_position_z": 4, "injection_column_depth": 4, "primary_lepton_1_typ": 4, "primary_hadron_1_typ": 4, "primary_lepton_1_position_x": 4, "primary_lepton_1_position_i": 4, "primary_lepton_1_position_z": 4, "primary_hadron_1_position_x": 4, "primary_hadron_1_position_i": 4, "primary_hadron_1_position_z": 4, "primary_lepton_1_direction_theta": 4, "primary_lepton_1_direction_phi": 4, "primary_hadron_1_direction_theta": 4, "primary_hadron_1_direction_phi": 4, "primary_lepton_1_energi": 4, "primary_hadron_1_energi": 4, "total_energi": 4, "i3_fil": [5, 14], "str": [5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 32, 35, 36, 38, 39, 40, 51, 52, 53, 69, 77, 80, 81, 84, 86, 88, 89, 90, 91, 92, 93, 95, 96, 98], "gcd_file": [5, 14], "paramet": [5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 32, 35, 36, 38, 39, 40, 51, 52, 53, 69, 77, 78, 80, 81, 82, 84, 86, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99], "output_fil": [5, 32, 35], "global_index": 5, "avail": [5, 16, 97], "pool": [5, 45, 47], "worker": [5, 32, 35, 39, 86, 98], "return": [5, 14, 27, 28, 29, 32, 35, 36, 38, 39, 40, 51, 52, 53, 69, 77, 78, 80, 81, 82, 84, 86, 88, 89, 90, 91, 92, 95, 96, 97, 98, 99], "none": [5, 14, 16, 24, 28, 29, 30, 32, 35, 36, 38, 40, 69, 77, 80, 82, 84, 86, 88, 89, 90, 92, 96, 98], "synchron": 5, "list": [5, 14, 16, 24, 27, 29, 30, 32, 35, 36, 38, 39, 40, 51, 69, 78, 80, 84, 90, 92, 93, 96, 98], "process_method": 5, "cach": 5, "output": [5, 32, 35, 38, 77, 84, 90, 91, 103], "typevar": 5, "f": 5, "bound": [5, 78], "callabl": [5, 29, 51, 52, 53, 84, 88, 90, 91, 92, 97], "ani": [5, 27, 28, 29, 30, 32, 35, 51, 52, 53, 69, 78, 80, 82, 84, 86, 88, 89, 90, 91, 92, 93, 98, 103], "outdir": [5, 32, 35, 38, 77], "gcd_rescu": [5, 32, 35, 96], "nb_files_to_batch": [5, 32, 35], "sequential_batch_pattern": [5, 32, 35], "input_file_batch_pattern": [5, 32, 35], "index_column": [5, 32, 35, 36, 40, 77, 84, 90], "icetray_verbos": [5, 32, 35], "i3_filt": [5, 32, 35], "abc": [5, 14, 69, 81, 84, 89, 90, 91], "logger": [5, 14, 30, 38, 40, 69, 81, 84, 85, 98, 103], "construct": [5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30, 32, 35, 38, 40, 51, 52, 53, 69, 77, 80, 81, 82, 84, 86, 89, 90, 91, 98], "regular": [5, 29, 32, 35], "express": [5, 32, 35, 69, 82], "accord": [5, 32, 35], "match": [5, 32, 35, 84, 96, 99], "certain": [5, 32, 35, 38, 77], "pattern": [5, 32, 35], "wildcard": [5, 32, 35], "same": [5, 29, 32, 35, 36, 80, 92, 98], "input": [5, 32, 35, 52, 88, 93, 95], "replac": [5, 32, 35, 88, 90, 91, 93, 95], "period": [5, 32, 35], "special": [5, 16, 32, 35], "interpret": [5, 32, 35], "liter": [5, 32, 35], "charact": [5, 32, 35], "regex": [5, 32, 35], "For": [5, 29, 32, 35, 80], "instanc": [5, 14, 24, 29, 32, 35, 69, 77, 81, 89, 91, 103], "A": [5, 30, 32, 35, 77, 82, 84, 88, 90, 91, 93, 103], "_": [5, 32, 35], "0": [5, 32, 35, 40, 77, 78, 82, 90], "9": [5, 32, 35], "5": [5, 32, 35, 40, 86, 103], "zst": [5, 32, 35], "find": [5, 32, 35, 96], "whose": [5, 32, 35], "one": [5, 32, 35, 36, 90, 91, 96, 101, 103], "capit": [5, 32, 35], "letter": [5, 32, 35], "follow": [5, 32, 35, 82, 84, 101, 103], "underscor": [5, 32, 35], "five": [5, 32, 35], "upgrade_genie_step4_141020_a_000000": [5, 32, 35], "upgrade_genie_step4_141020_a_000001": [5, 32, 35], "upgrade_genie_step4_141020_a_000008": [5, 32, 35], "upgrade_genie_step4_141020_a_000009": [5, 32, 35], "would": [5, 32, 35, 101], "upgrade_genie_step4_141020_a_00000x": [5, 32, 35], "suffix": [5, 32, 35], "upgrade_genie_step4_141020_a_000010": [5, 32, 35], "separ": [5, 27, 32, 35, 80, 103], "upgrade_genie_step4_141020_a_00001x": [5, 32, 35], "int": [5, 18, 21, 30, 32, 35, 40, 77, 80, 82, 84, 86, 90, 93, 98], "properti": [5, 14, 19, 29, 51, 81, 89, 98], "file_suffix": [5, 32, 35], "execut": [5, 36], "method": [5, 14, 26, 27, 28, 29, 32, 35, 51, 82, 84], "set": [5, 16, 101], "inherit": [5, 14, 29, 51, 82, 98], "path": [5, 36, 39, 69, 77, 78, 80, 86, 88, 89, 90, 96, 103], "correspond": [5, 27, 29, 35, 39, 84, 88, 90, 91, 93, 96, 103], "gcd": [5, 14, 28, 39, 96], "save_data": [5, 32, 35], "save": [5, 14, 27, 32, 35, 36, 69, 77, 80, 82, 84, 88, 89, 90, 91, 103], "ordereddict": [5, 32, 35], "extract": [5, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 35, 38, 39], "merge_fil": [5, 32, 35], "input_fil": [5, 32, 35], "merg": [5, 32, 35, 82, 103], "result": [5, 32, 35, 80, 82, 92, 103], "option": [5, 24, 32, 35, 69, 77, 78, 80, 84, 85, 86, 88, 90, 96, 103], "default": [5, 16, 24, 27, 32, 35, 36, 38, 69, 77, 78, 80, 81, 82, 84, 86, 88, 90, 96], "current": [5, 32, 35, 40, 80, 101, 103], "rais": [5, 16, 32, 69, 88, 93], "notimplementederror": [5, 32], "If": [5, 16, 30, 32, 35, 69, 77, 80, 84, 101, 103], "been": [5, 32, 82, 101], "backend": [5, 32, 35], "question": 5, "get_map_funct": 5, "nb_file": 5, "map": [5, 15, 16, 35, 36, 52, 53, 88, 90, 91, 93], "pure": [5, 13, 14, 16, 29], "multiprocess": [5, 103], "tupl": [5, 28, 29, 77, 78, 86, 95], "parquet_dataset": [7, 9], "sqlite_dataset": [7, 11], "collect": [13, 14, 26, 82, 99], "i3fram": [13, 14, 16, 28, 29], "frame": [13, 14, 16, 26, 29, 30, 35], "i3extractorcollect": [13, 14], "i3featureextractoricecube86": [13, 15], "i3featureextractoricecubedeepcor": [13, 15], "i3featureextractoricecubeupgrad": [13, 15], "i3pulsenoisetruthflagicecubeupgrad": [13, 15], "i3galacticplanehybridrecoextractor": [13, 17], "i3ntmuonlabelextractor": [13, 18], "i3splinempeicextractor": [13, 23], "inform": [14, 16, 24, 78], "should": [14, 27, 40, 82, 88, 90, 91, 93, 101, 103], "__call__": 14, "icetrai": [14, 28, 29, 97], "keep": 14, "proven": 14, "tabl": [14, 35, 36, 51, 77, 84], "set_fil": 14, "store": [14, 36, 77, 81], "refer": [14, 53, 90], "being": 14, "get": [14, 28, 51, 80, 103], "multipl": [14, 80, 90, 98], "treat": 14, "singl": [14, 81, 90, 91], "pulsemap": [15, 35, 90], "puls": [15, 16, 28, 29, 35, 36], "seri": [15, 16, 28, 29, 36], "86": [15, 52], "nois": [15, 28], "flag": 15, "ad": [15, 77], "kei": [16, 27, 28, 29, 35, 36, 81, 90, 91], "exclude_kei": 16, "dynam": 16, "pars": [16, 78, 85, 86, 87, 88, 93], "call": [16, 29, 35, 77, 80, 84, 98], "tri": [16, 29], "automat": [16, 82, 101], "cast": [16, 29], "done": [16, 98, 101], "recurs": [16, 29, 92, 96], "each": [16, 27, 29, 36, 38, 39, 52, 53, 77, 78, 80, 96], "look": [16, 103], "member": [16, 29, 90, 91, 98], "variabl": [16, 29, 84, 98], "signatur": [16, 29], "similar": [16, 29, 103], "dict": [16, 27, 29, 35, 51, 52, 53, 69, 77, 78, 80, 86, 88, 90, 91, 92, 93, 95], "handl": [16, 82, 86, 95, 98], "hand": 16, "case": [16, 103], "per": [16, 36, 82, 84], "mc": [16, 35, 36], "tree": [16, 35], "trigger": 16, "exclud": [16, 38, 103], "valueerror": [16, 69], "hybrid": 17, "galatict": 17, "plane": [17, 82], "tum": [18, 25], "dnn": [18, 25], "padding_valu": [18, 21], "northeren": 18, "i3particl": 19, "other": [19, 36, 82, 101], "algorithm": 19, "comparison": [19, 82], "quantiti": 20, "select": [21, 40, 84, 90, 101], "queso": 21, "retro": 22, "splinemp": 23, "border": 24, "mctree": [24, 28], "ndarrai": [24, 84], "arrai": [24, 27], "boundari": 24, "volum": 24, "coordin": [24, 51], "particl": [24, 36, 81], "start": [24, 101, 103], "stop": [24, 80, 86], "within": 24, "hard": 24, "i3mctre": 24, "valu": [24, 27, 35, 36, 78, 81, 82, 86, 88], "flatten_nested_dictionari": [26, 27], "serialis": [26, 27], "transpose_list_of_dict": [26, 27], "frame_is_montecarlo": [26, 28], "frame_is_nois": [26, 28], "get_om_keys_and_pulseseri": [26, 28], "is_boost_enum": [26, 29], "is_boost_class": [26, 29], "is_icecube_class": [26, 29], "is_typ": [26, 29], "is_method": [26, 29], "break_cyclic_recurs": [26, 29], "get_member_vari": [26, 29], "cast_object_to_pure_python": [26, 29], "cast_pulse_series_to_pure_python": [26, 29], "manipul": 27, "obj": [27, 29, 92], "parent_kei": 27, "flatten": 27, "nest": 27, "dictionari": [27, 28, 29, 35, 77, 78, 88, 90, 91, 93], "non": [27, 29, 35, 36, 82], "exampl": [27, 40, 82, 90, 91, 103], "d": [27, 101], "b": 27, "c": [27, 82, 103], "2": [27, 77, 78, 82, 90, 103], "a__b": 27, "applic": 27, "combin": [27, 90], "parent": 27, "__": [27, 29], "concaten": 27, "nester": 27, "json": [27, 90], "therefor": 27, "we": [27, 29, 40, 101, 103], "element": [27, 29, 92], "outer": 27, "abl": [27, 103], "de": 27, "transpos": 27, "check": [28, 29, 30, 35, 36, 86, 96, 97, 101, 103], "whether": [28, 29, 35, 36, 69, 82, 92, 96, 97], "mont": 28, "carlo": 28, "simul": 28, "bool": [28, 29, 30, 35, 36, 40, 69, 77, 80, 82, 84, 86, 92, 95, 96, 97, 98], "pulseseri": 28, "calibr": [28, 29], "indici": [28, 40, 82], "gcd_dict": [28, 29], "p": [28, 35, 82], "om": [28, 29], "dataclass": 28, "i3calibr": 28, "indicesfor": 28, "boost": 29, "enum": 29, "fn": [29, 88, 92], "ensur": [29, 39, 82, 98, 101, 103], "isn": 29, "return_discard": 29, "valid": [29, 40, 80, 82, 86, 88, 93], "ignor": 29, "mangl": 29, "take": [29, 35, 101], "mainli": 29, "cannot": [29, 88, 93], "trivial": 29, "doe": [29, 91], "try": 29, "length": [29, 80], "equival": 29, "its": 29, "like": [29, 82, 99, 101], "otherwis": [29, 82], "itself": 29, "deem": 29, "wai": [29, 40, 101, 103], "represent": 29, "optic": 29, "found": [29, 82], "class_nam": [30, 91, 98], "log_fold": [30, 98], "kwarg": [30, 51, 52, 53, 69, 80, 82, 84, 88, 90, 91, 98], "skip": 30, "null": [30, 36], "filter_nam": 30, "filter_ani": 30, "filtermask": 30, "initi": 30, "arg": [30, 51, 52, 53, 69, 82, 86, 88, 98], "true": [30, 35, 36, 77, 80, 82, 84, 90, 91, 93, 96], "kept": 30, "present": [30, 86, 96, 97], "fals": [30, 69, 77, 80, 82, 84, 90], "parquetdataconvert": [31, 32], "sqlitedataconvert": [34, 35, 103], "construct_datafram": [34, 35], "is_pulse_map": [34, 35], "is_mc_tre": [34, 35], "database_exist": [34, 36], "database_table_exist": [34, 36], "run_sql_cod": [34, 36], "save_to_sql": [34, 36], "attach_index": [34, 36], "create_t": [34, 36], "create_table_and_save_to_sql": [34, 36], "db": 35, "databas": [35, 36, 38, 77, 84, 103], "max_table_s": 35, "maximum": [35, 86], "row": [35, 36], "given": [35, 84, 86], "exce": 35, "limit": [35, 82], "creat": [35, 36, 88, 89, 93, 101, 103], "any_pulsemap_is_non_empti": 35, "data_dict": 35, "empti": 35, "retriev": [35, 51], "splitinicepuls": 35, "least": [35, 101, 103], "becaus": [35, 39], "instead": [35, 82], "alwai": 35, "panda": [35, 40, 84], "datafram": [35, 36, 40, 51, 77, 84], "table_nam": [35, 36], "database_path": [36, 77, 84], "df": 36, "must": [36, 80, 84, 101], "alreadi": [36, 103], "attach": 36, "queri": [36, 40], "column": [36, 51, 77, 84], "default_typ": 36, "integer_primary_kei": 36, "event_no": [36, 40, 84, 90], "NOT": [36, 82], "integ": [36, 82], "primari": 36, "Such": 36, "uniqu": [36, 38, 90], "appropri": 36, "expect": [36, 40], "doesn": 36, "parquettosqliteconvert": [37, 38], "pairwise_shuffl": [37, 39], "stringselectionresolv": [37, 40], "parquet_path": 38, "mc_truth_tabl": 38, "excluded_field": 38, "assign": [38, 101], "id": [38, 51], "everi": [38, 103], "field": [38, 78, 81, 88, 90, 91, 93, 95], "One": [38, 78], "choos": 38, "argument": [38, 80, 84, 86, 88, 90, 91, 93], "exclude_field": 38, "database_nam": 38, "convers": [38, 103], "directori": [38, 77, 80, 96], "rng": 39, "relat": [39, 96], "i3_list": [39, 96], "gcd_list": [39, 96], "shuffl": 39, "correpond": 39, "handi": 39, "even": 39, "files_list": 39, "gcd_shuffl": 39, "i3_shuffl": 39, "resolv": 40, "indic": [40, 80, 86, 101], "seed": [40, 90], "use_cach": 40, "datasetconfig": [40, 87, 90], "flexibl": 40, "defin": [40, 88, 90, 91, 93], "below": [40, 78, 84, 101, 103], "show": [40, 80], "involv": 40, "cover": 40, "yml": [40, 86, 90, 91], "test": [40, 90, 97, 101], "50000": [40, 90], "ab": [40, 82, 90], "12": [40, 90], "14": [40, 90], "16": [40, 90], "13": [40, 103], "10000": 40, "compat": 40, "syntax": [40, 82], "mai": [40, 103], "also": [40, 90], "specifi": [40, 78, 80, 103], "fix": 40, "randomli": [40, 91], "20": [40, 98], "graphnet_modul": [41, 42], "convnet": [45, 54], "dynedg": [45, 54], "dynedge_jinst": [45, 54], "dynedge_kaggle_tito": [45, 54], "edg": [45, 60], "node": [45, 60], "graph_definit": [45, 60, 90], "standardis": 50, "icecubekaggl": [50, 52], "icecubedeepcor": [50, 52], "icecubeupgrad": [50, 52], "orca150": [50, 53], "ins": 51, "feature_map": [51, 52, 53], "assum": 51, "forward": [51, 82], "input_featur": 51, "input_feature_nam": 51, "adjac": 51, "tensor": [51, 82, 95, 99], "geometry_t": [51, 52, 53], "public": [51, 84], "geometri": 51, "string_index_nam": 51, "sensor_position_nam": 51, "xyz": [51, 52, 53], "sensor_index_nam": 51, "sensor": 51, "geometry_table_path": [52, 53], "home": [52, 53, 86, 103], "runner": [52, 53, 86], "local": [52, 53, 86], "lib": [52, 53, 86, 103], "python3": [52, 53, 86], "string_id_column": [52, 53], "sensor_id_column": [52, 53], "sensor_id": [52, 53], "dimens": [52, 53, 82], "competit": 52, "icecube_upgrad": 52, "prototyp": 53, "orca_150": 53, "sensor_string_id": 53, "minkowski": [60, 61], "lightningmodul": [69, 80, 98], "entir": 69, "classmethod": [69, 82, 88, 89], "save_state_dict": 69, "state_dict": [69, 95], "load_state_dict": 69, "karg": 69, "from_config": [69, 89, 90, 91], "trust": 69, "load_modul": 69, "modelconfig": [69, 87, 90, 91], "enough": 69, "eval": [69, 103], "lambda": 69, "definit": [69, 101], "consequ": 69, "config_updat": [76, 77], "weightfitt": [76, 77, 79, 84], "contourfitt": [76, 77], "read_entri": [76, 78], "plot_2d_contour": [76, 78], "plot_1d_contour": [76, 78], "contour": [77, 78], "config_path": 77, "new_config_path": 77, "dummy_sect": 77, "updat": [77, 80], "temp": 77, "dummi": 77, "section": 77, "header": 77, "configupdat": 77, "programat": 77, "truth_tabl": [77, 84, 90], "statistical_fit": 77, "weight": [77, 82, 84, 91, 103], "fit_weight": [77, 84], "config_outdir": 77, "weight_nam": [77, 84], "pisa_config_dict": 77, "add_to_databas": [77, 84], "flux": 77, "self": [77, 88, 93], "_database_path": 77, "statist": 77, "effect": [77, 80, 101], "account": 77, "systemat": 77, "hypersurfac": 77, "chang": [77, 82, 101], "assumpt": 77, "regard": 77, "two": [77, 80, 82], "pipeline_path": 77, "post_fix": 77, "model_nam": 77, "include_retro": 77, "fit_1d_contour": 77, "run_nam": 77, "config_dict": 77, "grid_siz": 77, "n_worker": 77, "theta23_minmax": 77, "36": 77, "54": 77, "dm31_minmax": 77, "3": [77, 78, 82, 101, 103], "7": 77, "1d": [77, 78], "float": [77, 78, 80, 82, 90], "fit_2d_contour": 77, "2d": [77, 78, 82], "entri": [78, 86], "content": 78, "contour_data": 78, "xlim": 78, "4": 78, "6": 78, "ylim": 78, "0023799999999999997": 78, "0025499999999999997": 78, "chi2_critical_valu": 78, "width": 78, "height": 78, "path_to_pisa_fit_result": 78, "name_of_my_model_in_fit": 78, "legend": 78, "color": 78, "linestyl": 78, "style": [78, 101], "line": [78, 80, 86], "upper": 78, "axi": 78, "605": 78, "critic": [78, 98], "chi2": 78, "90": 78, "cl": 78, "note": [78, 91], "right": [78, 82], "176": 78, "inch": 78, "388": 78, "706": 78, "abov": [78, 82, 84, 103], "352": 78, "piecewiselinearlr": [79, 80], "progressbar": [79, 80], "graphnetearlystop": [79, 80], "lossfunct": [79, 82], "mseloss": [79, 82], "rmseloss": [79, 82], "logcoshloss": [79, 82], "crossentropyloss": [79, 82], "binarycrossentropyloss": [79, 82], "logcmk": [79, 82], "vonmisesfisherloss": [79, 82], "vonmisesfisher2dloss": [79, 82], "euclideandistanceloss": [79, 82], "vonmisesfisher3dloss": [79, 82], "uniform": [79, 84], "bjoernlow": [79, 84], "dure": 80, "optim": 80, "mileston": 80, "factor": 80, "last_epoch": 80, "verbos": 80, "_lrschedul": 80, "interpol": 80, "linearli": 80, "between": [80, 82, 90, 91], "denot": 80, "step": 80, "multipli": 80, "closest": 80, "befor": 80, "vice": 80, "versa": 80, "after": [80, 86, 90], "last": 80, "wrap": [80, 90, 91], "epoch": [80, 86], "print": [80, 98], "messag": [80, 98], "stdout": 80, "get_lr": 80, "refresh_r": 80, "process_posit": 80, "tqdmprogressbar": 80, "custom": 80, "progress": 80, "bar": 80, "customis": 80, "pytorch": [80, 103], "lightn": 80, "init_validation_tqdm": 80, "overrid": 80, "init_predict_tqdm": 80, "init_test_tqdm": 80, "init_train_tqdm": 80, "get_metr": 80, "trainer": 80, "version": [80, 101, 103], "on_train_epoch_start": 80, "previou": 80, "see": [80, 101, 103], "loss": [80, 82, 86], "metric": 80, "behaviour": 80, "overwrit": 80, "on_train_epoch_end": 80, "don": [80, 103], "duplciat": 80, "save_dir": 80, "earlystop": 80, "earli": [80, 86], "keyword": [80, 88, 93], "pass": [80, 82, 84, 101], "pytorch_lightn": [80, 98], "setup": [80, 103], "graphnet_model": 80, "stage": 80, "pl": 80, "on_validation_end": 80, "on_fit_end": 80, "runtim": [81, 103], "produc": [81, 84], "where": 81, "That": 81, "azimuth_kei": 81, "zenith_kei": 81, "access": [81, 103], "azimiuth": 81, "angl": 81, "calcul": [81, 82], "target": [82, 93], "return_el": 82, "shape": 82, "n": 82, "elementwis": 82, "term": 82, "altern": [82, 101], "averag": 82, "scalar": 82, "squar": 82, "error": [82, 98, 101], "root": [82, 103], "cosh": 82, "act": 82, "small": 82, "comput": [82, 88, 90, 91, 93], "cross": 82, "entropi": 82, "num_class": 82, "matrix": 82, "logit": 82, "softmax": 82, "ed": 82, "probabl": 82, "binari": 82, "vector": 82, "mit": 82, "licens": 82, "copyright": 82, "2019": 82, "max": [82, 86], "ryabinin": 82, "permiss": 82, "herebi": 82, "person": 82, "obtain": 82, "copi": 82, "associ": 82, "document": 82, "deal": 82, "restrict": 82, "modifi": 82, "publish": 82, "distribut": [82, 84], "sublicens": 82, "sell": 82, "permit": 82, "whom": 82, "furnish": 82, "so": [82, 103], "subject": 82, "condit": 82, "notic": 82, "shall": 82, "substanti": 82, "portion": 82, "THE": 82, "AS": 82, "warranti": 82, "OF": 82, "kind": 82, "OR": 82, "impli": 82, "BUT": 82, "TO": 82, "merchant": 82, "FOR": 82, "particular": [82, 101], "AND": 82, "noninfring": 82, "IN": 82, "NO": 82, "author": 82, "holder": 82, "BE": 82, "liabl": 82, "claim": 82, "damag": 82, "liabil": 82, "action": 82, "contract": 82, "tort": 82, "aris": 82, "out": [82, 98, 101, 103], "WITH": 82, "_____________________": 82, "http": [82, 101], "github": [82, 103], "com": [82, 103], "mryab": 82, "vmf_loss": 82, "blob": 82, "master": 82, "py": [82, 103], "bessel": 82, "exponenti": 82, "scale": 82, "ditto": 82, "iv": 82, "1812": 82, "04616": 82, "spite": 82, "suggest": 82, "sec": 82, "8": [82, 101, 103], "paper": 82, "exact": 82, "m": 82, "correct": 82, "static": [82, 101], "ctx": 82, "kappa": 82, "backward": 82, "grad_output": 82, "von": 82, "mise": 82, "fisher": 82, "log_cmk_exact": 82, "c_": 82, "k": 82, "exactli": [82, 98], "log_cmk_approx": 82, "approx": 82, "arxiv": 82, "org": [82, 103], "addit": [82, 84], "minu": 82, "sign": 82, "log_cmk": 82, "kappa_switch": 82, "sinc": 82, "diverg": 82, "700": 82, "float64": 82, "precis": 82, "unaccur": 82, "switch": 82, "three": 82, "3d": 82, "uniformweightfitt": 84, "bin": 84, "privat": 84, "_fit_weight": 84, "sql": 84, "desir": [84, 96], "space": 84, "np": 84, "log10": 84, "happen": 84, "x_low": 84, "wherea": 84, "curv": 84, "base_config": [85, 87], "dataset_config": [85, 87], "model_config": [85, 87, 88, 90, 93], "training_config": [85, 87], "argumentpars": [85, 86], "rename_state_dict_entri": [85, 95], "is_gcd_fil": [85, 96], "is_i3_fil": [85, 96], "has_extens": [85, 96], "find_i3_fil": [85, 96], "has_icecube_packag": [85, 97], "has_torch_packag": [85, 97], "has_pisa_packag": [85, 97], "requires_icecub": [85, 97], "repeatfilt": [85, 98], "eps_lik": [85, 99], "consist": [86, 98, 101], "cli": 86, "pop_default": 86, "remov": 86, "usag": 86, "descript": 86, "command": [86, 103], "standard_argu": 86, "size": 86, "128": 86, "help": [86, 101], "training_example_data_sqlit": 86, "patienc": 86, "gpu": [86, 103], "narg": 86, "50": 86, "example_energy_reconstruction_model": 86, "num": 86, "fetch": 86, "with_standard_argu": 86, "add": [86, 95, 101, 103], "overwritten": [86, 88], "baseconfig": [87, 88, 89, 90, 91, 93], "get_all_argument_valu": [87, 88], "save_dataset_config": [87, 90], "datasetconfigsavermeta": [87, 90], "datasetconfigsaverabcmeta": [87, 90], "save_model_config": [87, 91], "modelconfigsavermeta": [87, 91], "modelconfigsaverabc": [87, 91], "traverse_and_appli": [87, 92], "list_all_submodul": [87, 92], "get_all_grapnet_class": [87, 92], "is_graphnet_modul": [87, 92], "is_graphnet_class": [87, 92], "get_graphnet_class": [87, 92], "trainingconfig": [87, 93], "basemodel": [88, 90, 91], "validationerror": [88, 93], "pydantic_cor": [88, 93], "explicitli": [88, 93], "posit": [88, 93], "dump": [88, 90, 91], "yaml": [88, 89], "as_dict": [88, 90, 91], "repres": [88, 90, 91], "model_computed_field": [88, 90, 91, 93], "classvar": [88, 90, 91, 93], "computedfieldinfo": [88, 90, 91, 93], "configdict": [88, 90, 91, 93], "conform": [88, 90, 91, 93], "pydant": [88, 90, 91, 93], "model_field": [88, 90, 91, 93], "fieldinfo": [88, 90, 91, 93], "metadata": [88, 90, 91, 93], "about": [88, 90, 91, 93], "__fields__": [88, 90, 91, 93], "v1": [88, 90, 91, 93, 103], "re": [89, 103], "save_config": 89, "node_truth": 90, "node_truth_t": 90, "string_select": 90, "loss_weight_t": 90, "loss_weight_column": 90, "loss_weight_default_valu": 90, "dataconfig": 90, "transpar": [90, 91, 101], "reproduc": [90, 91], "In": [90, 91, 103], "session": [90, 91], "anoth": [90, 91], "you": [90, 91, 101, 103], "ensembledataset": 90, "still": 90, "csv": 90, "train_select": 90, "test_select": 90, "unambigu": [90, 91], "annot": [90, 91, 93], "nonetyp": 90, "init_fn": [90, 91], "__init__": [90, 91, 103], "metaclass": [90, 91], "abcmeta": [90, 91], "datasetconfigsav": 90, "trainabl": 91, "hyperparamet": 91, "instanti": 91, "initialis": 91, "thu": 91, "modelconfigsav": 91, "fn_kwarg": 92, "structur": 92, "moduletyp": 92, "grapnet": 92, "lookup": 92, "early_stopping_pati": 93, "deprec": 95, "transit": 95, "old_phras": 95, "new_phras": 95, "deepcopi": 95, "who": 95, "renam": 95, "phrase": 95, "place": [95, 101], "system": [96, 103], "filenam": 96, "dir": 96, "search": 96, "test_funct": 97, "repeat": 98, "nb_repeats_allow": 98, "record": 98, "logrecord": 98, "clear": 98, "intuit": 98, "composit": 98, "rather": 98, "loggeradapt": 98, "chosen": 98, "avoid": [98, 101], "clash": 98, "setlevel": 98, "deleg": 98, "msg": 98, "warn": 98, "info": [98, 103], "debug": 98, "warning_onc": 98, "onc": 98, "handler": 98, "file_handl": 98, "filehandl": 98, "stream_handl": 98, "streamhandl": 98, "assort": 99, "ep": 99, "dtype": 99, "api": 100, "To": [101, 103], "sure": [101, 103], "smooth": 101, "guidelin": 101, "guid": 101, "encourag": 101, "contributor": 101, "discuss": 101, "bug": 101, "anyth": 101, "describ": 101, "yourself": 101, "ownership": 101, "activ": [101, 103], "prioriti": 101, "situat": 101, "lot": 101, "effort": 101, "go": 101, "turn": 101, "outsid": 101, "scope": 101, "solut": 101, "better": 101, "fork": 101, "repo": 101, "dedic": 101, "branch": [101, 103], "your": [101, 103], "repositori": 101, "graphdefinit": 101, "euclidean": 101, "own": [101, 103], "team": 101, "accept": 101, "autom": 101, "review": 101, "pep8": 101, "docstr": 101, "googl": 101, "hint": 101, "clean": [101, 103], "adher": 101, "pep": 101, "pylint": 101, "flake8": 101, "black": 101, "well": 101, "recommend": [101, 103], "mypi": 101, "pydocstyl": 101, "docformatt": 101, "commit": 101, "hook": 101, "instal": 101, "come": 101, "tag": [101, 103], "pip": [101, 103], "Then": 101, "everytim": 101, "pep257": 101, "concept": 101, "ljvmiranda921": 101, "io": 101, "notebook": 101, "2018": 101, "06": 101, "21": 101, "precommit": 101, "environ": 103, "virtual": 103, "anaconda": 103, "prove": 103, "instruct": 103, "want": 103, "part": 103, "achiev": 103, "bash": 103, "shell": 103, "cvmf": 103, "opensciencegrid": 103, "py3": 103, "v4": 103, "sh": 103, "rhel_7_x86_64": 103, "metaproject": 103, "env": 103, "alia": 103, "script": 103, "With": 103, "now": 103, "light": 103, "extra": 103, "geometr": 103, "just": 103, "won": 103, "later": 103, "r": 103, "torch_cpu": 103, "txt": 103, "cpu": 103, "torch_gpu": 103, "prefer": 103, "unix": 103, "git": 103, "clone": 103, "usernam": 103, "cd": 103, "conda": 103, "gcc_linux": 103, "64": 103, "gxx_linux": 103, "libgcc": 103, "cudatoolkit": 103, "11": 103, "forg": 103, "box": 103, "compil": 103, "gcc": 103, "date": 103, "possibli": 103, "cuda": 103, "toolkit": 103, "recent": 103, "omit": 103, "newer": 103, "export": 103, "ld_library_path": 103, "anaconda3": 103, "miniconda3": 103, "bashrc": 103, "librari": 103, "intend": 103, "consid": 103, "rm": 103, "asogaard": 103, "latest": 103, "dc423315742c": 103, "01_icetrai": 103, "01_convert_i3_fil": 103, "2023": 103, "01": 103, "24": 103, "41": 103, "27": 103, "write": 103, "graphnet_20230124": 103, "134127": 103, "46": 103, "convert_i3_fil": 103, "ic86": 103, "thread": 103, "100": 103, "00": 103, "79": 103, "42": 103, "26": 103, "413": 103, "88it": 103, "specialis": 103, "ones": 103, "push": 103, "vx": 103}, "objects": {"": [[1, 0, 0, "-", "graphnet"]], "graphnet": [[2, 0, 0, "-", "constants"], [3, 0, 0, "-", "data"], [41, 0, 0, "-", "deployment"], [76, 0, 0, "-", "pisa"], [79, 0, 0, "-", "training"], [85, 0, 0, "-", "utilities"]], "graphnet.data": [[4, 0, 0, "-", "constants"], [5, 0, 0, "-", "dataconverter"], [13, 0, 0, "-", "extractors"], [30, 0, 0, "-", "filters"], [31, 0, 0, "-", "parquet"], [34, 0, 0, "-", "sqlite"], [37, 0, 0, "-", "utilities"]], "graphnet.data.constants": [[4, 1, 1, "", "FEATURES"], [4, 1, 1, "", "TRUTH"]], "graphnet.data.constants.FEATURES": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.constants.TRUTH": [[4, 2, 1, "", "DEEPCORE"], [4, 2, 1, "", "ICECUBE86"], [4, 2, 1, "", "KAGGLE"], [4, 2, 1, "", "PROMETHEUS"], [4, 2, 1, "", "UPGRADE"]], "graphnet.data.dataconverter": [[5, 1, 1, "", "DataConverter"], [5, 1, 1, "", "FileSet"], [5, 5, 1, "", "cache_output_files"], [5, 5, 1, "", "init_global_index"]], "graphnet.data.dataconverter.DataConverter": [[5, 3, 1, "", "execute"], [5, 4, 1, "", "file_suffix"], [5, 3, 1, "", "get_map_function"], [5, 3, 1, "", "merge_files"], [5, 3, 1, "", "save_data"]], "graphnet.data.dataconverter.FileSet": [[5, 2, 1, "", "gcd_file"], [5, 2, 1, "", "i3_file"]], "graphnet.data.extractors": [[14, 0, 0, "-", "i3extractor"], [15, 0, 0, "-", "i3featureextractor"], [16, 0, 0, "-", "i3genericextractor"], [17, 0, 0, "-", "i3hybridrecoextractor"], [18, 0, 0, "-", "i3ntmuonlabelsextractor"], [19, 0, 0, "-", "i3particleextractor"], [20, 0, 0, "-", "i3pisaextractor"], [21, 0, 0, "-", "i3quesoextractor"], [22, 0, 0, "-", "i3retroextractor"], [23, 0, 0, "-", "i3splinempeextractor"], [24, 0, 0, "-", "i3truthextractor"], [25, 0, 0, "-", "i3tumextractor"], [26, 0, 0, "-", "utilities"]], "graphnet.data.extractors.i3extractor": [[14, 1, 1, "", "I3Extractor"], [14, 1, 1, "", "I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor.I3Extractor": [[14, 4, 1, "", "name"], [14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3extractor.I3ExtractorCollection": [[14, 3, 1, "", "set_files"]], "graphnet.data.extractors.i3featureextractor": [[15, 1, 1, "", "I3FeatureExtractor"], [15, 1, 1, "", "I3FeatureExtractorIceCube86"], [15, 1, 1, "", "I3FeatureExtractorIceCubeDeepCore"], [15, 1, 1, "", "I3FeatureExtractorIceCubeUpgrade"], [15, 1, 1, "", "I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3genericextractor": [[16, 1, 1, "", "I3GenericExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, 1, 1, "", "I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, 1, 1, "", "I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, 1, 1, "", "I3ParticleExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, 1, 1, "", "I3PISAExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, 1, 1, "", "I3QUESOExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, 1, 1, "", "I3RetroExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, 1, 1, "", "I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, 1, 1, "", "I3TruthExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, 1, 1, "", "I3TUMExtractor"]], "graphnet.data.extractors.utilities": [[27, 0, 0, "-", "collections"], [28, 0, 0, "-", "frames"], [29, 0, 0, "-", "types"]], "graphnet.data.extractors.utilities.collections": [[27, 5, 1, "", "flatten_nested_dictionary"], [27, 5, 1, "", "serialise"], [27, 5, 1, "", "transpose_list_of_dicts"]], "graphnet.data.extractors.utilities.frames": [[28, 5, 1, "", "frame_is_montecarlo"], [28, 5, 1, "", "frame_is_noise"], [28, 5, 1, "", "get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.types": [[29, 5, 1, "", "break_cyclic_recursion"], [29, 5, 1, "", "cast_object_to_pure_python"], [29, 5, 1, "", "cast_pulse_series_to_pure_python"], [29, 5, 1, "", "get_member_variables"], [29, 5, 1, "", "is_boost_class"], [29, 5, 1, "", "is_boost_enum"], [29, 5, 1, "", "is_icecube_class"], [29, 5, 1, "", "is_method"], [29, 5, 1, "", "is_type"]], "graphnet.data.filters": [[30, 1, 1, "", "I3Filter"], [30, 1, 1, "", "I3FilterMask"], [30, 1, 1, "", "NullSplitI3Filter"]], "graphnet.data.parquet": [[32, 0, 0, "-", "parquet_dataconverter"]], "graphnet.data.parquet.parquet_dataconverter": [[32, 1, 1, "", "ParquetDataConverter"]], "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter": [[32, 2, 1, "", "file_suffix"], [32, 3, 1, "", "merge_files"], [32, 3, 1, "", "save_data"]], "graphnet.data.sqlite": [[35, 0, 0, "-", "sqlite_dataconverter"], [36, 0, 0, "-", "sqlite_utilities"]], "graphnet.data.sqlite.sqlite_dataconverter": [[35, 1, 1, "", "SQLiteDataConverter"], [35, 5, 1, "", "construct_dataframe"], [35, 5, 1, "", "is_mc_tree"], [35, 5, 1, "", "is_pulse_map"]], "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter": [[35, 3, 1, "", "any_pulsemap_is_non_empty"], [35, 2, 1, "", "file_suffix"], [35, 3, 1, "", "merge_files"], [35, 3, 1, "", "save_data"]], "graphnet.data.sqlite.sqlite_utilities": [[36, 5, 1, "", "attach_index"], [36, 5, 1, "", "create_table"], [36, 5, 1, "", "create_table_and_save_to_sql"], [36, 5, 1, "", "database_exists"], [36, 5, 1, "", "database_table_exists"], [36, 5, 1, "", "run_sql_code"], [36, 5, 1, "", "save_to_sql"]], "graphnet.data.utilities": [[38, 0, 0, "-", "parquet_to_sqlite"], [39, 0, 0, "-", "random"], [40, 0, 0, "-", "string_selection_resolver"]], "graphnet.data.utilities.parquet_to_sqlite": [[38, 1, 1, "", "ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter": [[38, 3, 1, "", "run"]], "graphnet.data.utilities.random": [[39, 5, 1, "", "pairwise_shuffle"]], "graphnet.data.utilities.string_selection_resolver": [[40, 1, 1, "", "StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver": [[40, 3, 1, "", "resolve"]], "graphnet.models": [[50, 0, 0, "-", "detector"], [69, 0, 0, "-", "model"]], "graphnet.models.detector": [[51, 0, 0, "-", "detector"], [52, 0, 0, "-", "icecube"], [53, 0, 0, "-", "prometheus"]], "graphnet.models.detector.detector": [[51, 1, 1, "", "Detector"]], "graphnet.models.detector.detector.Detector": [[51, 3, 1, "", "feature_map"], [51, 3, 1, "", "forward"], [51, 4, 1, "", "geometry_table"], [51, 4, 1, "", "sensor_index_name"], [51, 4, 1, "", "sensor_position_names"], [51, 4, 1, "", "string_index_name"]], "graphnet.models.detector.icecube": [[52, 1, 1, "", "IceCube86"], [52, 1, 1, "", "IceCubeDeepCore"], [52, 1, 1, "", "IceCubeKaggle"], [52, 1, 1, "", "IceCubeUpgrade"]], "graphnet.models.detector.icecube.IceCube86": [[52, 3, 1, "", "feature_map"], [52, 2, 1, "", "geometry_table_path"], [52, 2, 1, "", "sensor_id_column"], [52, 2, 1, "", "string_id_column"], [52, 2, 1, "", "xyz"]], "graphnet.models.detector.icecube.IceCubeDeepCore": [[52, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeKaggle": [[52, 3, 1, "", "feature_map"]], "graphnet.models.detector.icecube.IceCubeUpgrade": [[52, 3, 1, "", "feature_map"], [52, 2, 1, "", "geometry_table_path"], [52, 2, 1, "", "sensor_id_column"], [52, 2, 1, "", "string_id_column"], [52, 2, 1, "", "xyz"]], "graphnet.models.detector.prometheus": [[53, 1, 1, "", "ORCA150"], [53, 1, 1, "", "Prometheus"]], "graphnet.models.detector.prometheus.ORCA150": [[53, 3, 1, "", "feature_map"], [53, 2, 1, "", "geometry_table_path"], [53, 2, 1, "", "sensor_id_column"], [53, 2, 1, "", "string_id_column"], [53, 2, 1, "", "xyz"]], "graphnet.models.model": [[69, 1, 1, "", "Model"]], "graphnet.models.model.Model": [[69, 3, 1, "", "from_config"], [69, 3, 1, "", "load"], [69, 3, 1, "", "load_state_dict"], [69, 3, 1, "", "save"], [69, 3, 1, "", "save_state_dict"]], "graphnet.pisa": [[77, 0, 0, "-", "fitting"], [78, 0, 0, "-", "plotting"]], "graphnet.pisa.fitting": [[77, 1, 1, "", "ContourFitter"], [77, 1, 1, "", "WeightFitter"], [77, 5, 1, "", "config_updater"]], "graphnet.pisa.fitting.ContourFitter": [[77, 3, 1, "", "fit_1d_contour"], [77, 3, 1, "", "fit_2d_contour"]], "graphnet.pisa.fitting.WeightFitter": [[77, 3, 1, "", "fit_weights"]], "graphnet.pisa.plotting": [[78, 5, 1, "", "plot_1D_contour"], [78, 5, 1, "", "plot_2D_contour"], [78, 5, 1, "", "read_entry"]], "graphnet.training": [[80, 0, 0, "-", "callbacks"], [81, 0, 0, "-", "labels"], [82, 0, 0, "-", "loss_functions"], [84, 0, 0, "-", "weight_fitting"]], "graphnet.training.callbacks": [[80, 1, 1, "", "GraphnetEarlyStopping"], [80, 1, 1, "", "PiecewiseLinearLR"], [80, 1, 1, "", "ProgressBar"]], "graphnet.training.callbacks.GraphnetEarlyStopping": [[80, 3, 1, "", "on_fit_end"], [80, 3, 1, "", "on_train_epoch_end"], [80, 3, 1, "", "on_validation_end"], [80, 3, 1, "", "setup"]], "graphnet.training.callbacks.PiecewiseLinearLR": [[80, 3, 1, "", "get_lr"]], "graphnet.training.callbacks.ProgressBar": [[80, 3, 1, "", "get_metrics"], [80, 3, 1, "", "init_predict_tqdm"], [80, 3, 1, "", "init_test_tqdm"], [80, 3, 1, "", "init_train_tqdm"], [80, 3, 1, "", "init_validation_tqdm"], [80, 3, 1, "", "on_train_epoch_end"], [80, 3, 1, "", "on_train_epoch_start"]], "graphnet.training.labels": [[81, 1, 1, "", "Direction"], [81, 1, 1, "", "Label"]], "graphnet.training.labels.Label": [[81, 4, 1, "", "key"]], "graphnet.training.loss_functions": [[82, 1, 1, "", "BinaryCrossEntropyLoss"], [82, 1, 1, "", "CrossEntropyLoss"], [82, 1, 1, "", "EuclideanDistanceLoss"], [82, 1, 1, "", "LogCMK"], [82, 1, 1, "", "LogCoshLoss"], [82, 1, 1, "", "LossFunction"], [82, 1, 1, "", "MSELoss"], [82, 1, 1, "", "RMSELoss"], [82, 1, 1, "", "VonMisesFisher2DLoss"], [82, 1, 1, "", "VonMisesFisher3DLoss"], [82, 1, 1, "", "VonMisesFisherLoss"]], "graphnet.training.loss_functions.LogCMK": [[82, 3, 1, "", "backward"], [82, 3, 1, "", "forward"]], "graphnet.training.loss_functions.LossFunction": [[82, 3, 1, "", "forward"]], "graphnet.training.loss_functions.VonMisesFisherLoss": [[82, 3, 1, "", "log_cmk"], [82, 3, 1, "", "log_cmk_approx"], [82, 3, 1, "", "log_cmk_exact"]], "graphnet.training.weight_fitting": [[84, 1, 1, "", "BjoernLow"], [84, 1, 1, "", "Uniform"], [84, 1, 1, "", "WeightFitter"]], "graphnet.training.weight_fitting.WeightFitter": [[84, 3, 1, "", "fit"]], "graphnet.utilities": [[86, 0, 0, "-", "argparse"], [87, 0, 0, "-", "config"], [94, 0, 0, "-", "decorators"], [95, 0, 0, "-", "deprecation_tools"], [96, 0, 0, "-", "filesys"], [97, 0, 0, "-", "imports"], [98, 0, 0, "-", "logging"], [99, 0, 0, "-", "maths"]], "graphnet.utilities.argparse": [[86, 1, 1, "", "ArgumentParser"], [86, 1, 1, "", "Options"]], "graphnet.utilities.argparse.ArgumentParser": [[86, 2, 1, "", "standard_arguments"], [86, 3, 1, "", "with_standard_arguments"]], "graphnet.utilities.argparse.Options": [[86, 3, 1, "", "contains"], [86, 3, 1, "", "pop_default"]], "graphnet.utilities.config": [[88, 0, 0, "-", "base_config"], [89, 0, 0, "-", "configurable"], [90, 0, 0, "-", "dataset_config"], [91, 0, 0, "-", "model_config"], [92, 0, 0, "-", "parsing"], [93, 0, 0, "-", "training_config"]], "graphnet.utilities.config.base_config": [[88, 1, 1, "", "BaseConfig"], [88, 5, 1, "", "get_all_argument_values"]], "graphnet.utilities.config.base_config.BaseConfig": [[88, 3, 1, "", "as_dict"], [88, 3, 1, "", "dump"], [88, 3, 1, "", "load"], [88, 2, 1, "", "model_computed_fields"], [88, 2, 1, "", "model_config"], [88, 2, 1, "", "model_fields"]], "graphnet.utilities.config.configurable": [[89, 1, 1, "", "Configurable"]], "graphnet.utilities.config.configurable.Configurable": [[89, 4, 1, "", "config"], [89, 3, 1, "", "from_config"], [89, 3, 1, "", "save_config"]], "graphnet.utilities.config.dataset_config": [[90, 1, 1, "", "DatasetConfig"], [90, 1, 1, "", "DatasetConfigSaverABCMeta"], [90, 1, 1, "", "DatasetConfigSaverMeta"], [90, 5, 1, "", "save_dataset_config"]], "graphnet.utilities.config.dataset_config.DatasetConfig": [[90, 3, 1, "", "as_dict"], [90, 2, 1, "", "features"], [90, 2, 1, "", "graph_definition"], [90, 2, 1, "", "index_column"], [90, 2, 1, "", "loss_weight_column"], [90, 2, 1, "", "loss_weight_default_value"], [90, 2, 1, "", "loss_weight_table"], [90, 2, 1, "", "model_computed_fields"], [90, 2, 1, "", "model_config"], [90, 2, 1, "", "model_fields"], [90, 2, 1, "", "node_truth"], [90, 2, 1, "", "node_truth_table"], [90, 2, 1, "", "path"], [90, 2, 1, "", "pulsemaps"], [90, 2, 1, "", "seed"], [90, 2, 1, "", "selection"], [90, 2, 1, "", "string_selection"], [90, 2, 1, "", "truth"], [90, 2, 1, "", "truth_table"]], "graphnet.utilities.config.model_config": [[91, 1, 1, "", "ModelConfig"], [91, 1, 1, "", "ModelConfigSaverABC"], [91, 1, 1, "", "ModelConfigSaverMeta"], [91, 5, 1, "", "save_model_config"]], "graphnet.utilities.config.model_config.ModelConfig": [[91, 2, 1, "", "arguments"], [91, 3, 1, "", "as_dict"], [91, 2, 1, "", "class_name"], [91, 2, 1, "", "model_computed_fields"], [91, 2, 1, "", "model_config"], [91, 2, 1, "", "model_fields"]], "graphnet.utilities.config.parsing": [[92, 5, 1, "", "get_all_grapnet_classes"], [92, 5, 1, "", "get_graphnet_classes"], [92, 5, 1, "", "is_graphnet_class"], [92, 5, 1, "", "is_graphnet_module"], [92, 5, 1, "", "list_all_submodules"], [92, 5, 1, "", "traverse_and_apply"]], "graphnet.utilities.config.training_config": [[93, 1, 1, "", "TrainingConfig"]], "graphnet.utilities.config.training_config.TrainingConfig": [[93, 2, 1, "", "dataloader"], [93, 2, 1, "", "early_stopping_patience"], [93, 2, 1, "", "fit"], [93, 2, 1, "", "model_computed_fields"], [93, 2, 1, "", "model_config"], [93, 2, 1, "", "model_fields"], [93, 2, 1, "", "target"]], "graphnet.utilities.deprecation_tools": [[95, 5, 1, "", "rename_state_dict_entries"]], "graphnet.utilities.filesys": [[96, 5, 1, "", "find_i3_files"], [96, 5, 1, "", "has_extension"], [96, 5, 1, "", "is_gcd_file"], [96, 5, 1, "", "is_i3_file"]], "graphnet.utilities.imports": [[97, 5, 1, "", "has_icecube_package"], [97, 5, 1, "", "has_pisa_package"], [97, 5, 1, "", "has_torch_package"], [97, 5, 1, "", "requires_icecube"]], "graphnet.utilities.logging": [[98, 1, 1, "", "Logger"], [98, 1, 1, "", "RepeatFilter"]], "graphnet.utilities.logging.Logger": [[98, 3, 1, "", "critical"], [98, 3, 1, "", "debug"], [98, 3, 1, "", "error"], [98, 4, 1, "", "file_handlers"], [98, 4, 1, "", "handlers"], [98, 3, 1, "", "info"], [98, 3, 1, "", "setLevel"], [98, 4, 1, "", "stream_handlers"], [98, 3, 1, "", "warning"], [98, 3, 1, "", "warning_once"]], "graphnet.utilities.logging.RepeatFilter": [[98, 3, 1, "", "filter"], [98, 2, 1, "", "nb_repeats_allowed"]], "graphnet.utilities.maths": [[99, 5, 1, "", "eps_like"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:attribute", "3": "py:method", "4": "py:property", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "attribute", "Python attribute"], "3": ["py", "method", "Python method"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"]}, "titleterms": {"about": [0, 102], "impact": [0, 102], "usag": [0, 102], "acknowledg": [0, 102], "api": 1, "constant": [2, 4], "data": 3, "dataconvert": 5, "dataload": 6, "dataset": [7, 8], "parquet": [9, 31], "parquet_dataset": 10, "sqlite": [11, 34], "sqlite_dataset": 12, "extractor": 13, "i3extractor": 14, "i3featureextractor": 15, "i3genericextractor": 16, "i3hybridrecoextractor": 17, "i3ntmuonlabelsextractor": 18, "i3particleextractor": 19, "i3pisaextractor": 20, "i3quesoextractor": 21, "i3retroextractor": 22, "i3splinempeextractor": 23, "i3truthextractor": 24, "i3tumextractor": 25, "util": [26, 37, 68, 75, 83, 85], "collect": 27, "frame": 28, "type": 29, "filter": 30, "parquet_dataconvert": 32, "pipelin": 33, "sqlite_dataconvert": 35, "sqlite_util": 36, "parquet_to_sqlit": 38, "random": 39, "string_selection_resolv": 40, "deploy": [41, 43], "i3modul": 42, "graphnet_modul": 44, "model": [45, 69], "coarsen": 46, "compon": 47, "layer": 48, "pool": 49, "detector": [50, 51], "icecub": 52, "prometheu": 53, "gnn": [54, 59], "convnet": 55, "dynedg": 56, "dynedge_jinst": 57, "dynedge_kaggle_tito": 58, "graph": [60, 65], "edg": [61, 62], "minkowski": 63, "graph_definit": 64, "node": [66, 67], "standard_model": 70, "task": [71, 74], "classif": 72, "reconstruct": 73, "pisa": 76, "fit": 77, "plot": 78, "train": 79, "callback": 80, "label": 81, "loss_funct": 82, "weight_fit": 84, "argpars": 86, "config": 87, "base_config": 88, "configur": 89, "dataset_config": 90, "model_config": 91, "pars": 92, "training_config": 93, "decor": 94, "deprecation_tool": 95, "filesi": 96, "import": 97, "log": 98, "math": 99, "src": 100, "contribut": 101, "github": 101, "issu": 101, "pull": 101, "request": 101, "convent": 101, "code": 101, "qualiti": 101, "instal": 103, "icetrai": 103, "stand": 103, "alon": 103, "run": 103, "docker": 103}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"About": [[0, "about"], [102, "about"]], "Impact": [[0, "impact"], [102, "impact"]], "Usage": [[0, "usage"], [102, "usage"]], "Acknowledgements": [[0, "acknowledgements"], [102, "acknowledgements"]], "API": [[1, "module-graphnet"]], "constants": [[2, "module-graphnet.constants"], [4, "module-graphnet.data.constants"]], "data": [[3, "module-graphnet.data"]], "dataconverter": [[5, "module-graphnet.data.dataconverter"]], "dataloader": [[6, "dataloader"]], "dataset": [[7, "dataset"], [8, "dataset"]], "parquet": [[9, "parquet"], [31, "module-graphnet.data.parquet"]], "parquet_dataset": [[10, "parquet-dataset"]], "sqlite": [[11, "sqlite"], [34, "module-graphnet.data.sqlite"]], "sqlite_dataset": [[12, "sqlite-dataset"]], "extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "utilities": [[26, "module-graphnet.data.extractors.utilities"], [37, "module-graphnet.data.utilities"], [85, "module-graphnet.utilities"]], "collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "types": [[29, "module-graphnet.data.extractors.utilities.types"]], "filters": [[30, "module-graphnet.data.filters"]], "parquet_dataconverter": [[32, "module-graphnet.data.parquet.parquet_dataconverter"]], "pipeline": [[33, "pipeline"]], "sqlite_dataconverter": [[35, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "sqlite_utilities": [[36, "module-graphnet.data.sqlite.sqlite_utilities"]], "parquet_to_sqlite": [[38, "module-graphnet.data.utilities.parquet_to_sqlite"]], "random": [[39, "module-graphnet.data.utilities.random"]], "string_selection_resolver": [[40, "module-graphnet.data.utilities.string_selection_resolver"]], "deployment": [[41, "module-graphnet.deployment"]], "i3modules": [[42, "i3modules"]], "deployer": [[43, "deployer"]], "graphnet_module": [[44, "graphnet-module"]], "models": [[45, "models"]], "coarsening": [[46, "coarsening"]], "components": [[47, "components"]], "layers": [[48, "layers"]], "pool": [[49, "pool"]], "detector": [[50, "module-graphnet.models.detector"], [51, "module-graphnet.models.detector.detector"]], "icecube": [[52, "module-graphnet.models.detector.icecube"]], "prometheus": [[53, "module-graphnet.models.detector.prometheus"]], "gnn": [[54, "gnn"], [59, "gnn"]], "convnet": [[55, "convnet"]], "dynedge": [[56, "dynedge"]], "dynedge_jinst": [[57, "dynedge-jinst"]], "dynedge_kaggle_tito": [[58, "dynedge-kaggle-tito"]], "graphs": [[60, "graphs"], [65, "graphs"]], "edges": [[61, "edges"], [62, "edges"]], "minkowski": [[63, "minkowski"]], "graph_definition": [[64, "graph-definition"]], "nodes": [[66, "nodes"], [67, "nodes"]], "utils": [[68, "utils"], [75, "utils"], [83, "utils"]], "model": [[69, "module-graphnet.models.model"]], "standard_model": [[70, "standard-model"]], "task": [[71, "task"], [74, "task"]], "classification": [[72, "classification"]], "reconstruction": [[73, "reconstruction"]], "pisa": [[76, "module-graphnet.pisa"]], "fitting": [[77, "module-graphnet.pisa.fitting"]], "plotting": [[78, "module-graphnet.pisa.plotting"]], "training": [[79, "module-graphnet.training"]], "callbacks": [[80, "module-graphnet.training.callbacks"]], "labels": [[81, "module-graphnet.training.labels"]], "loss_functions": [[82, "module-graphnet.training.loss_functions"]], "weight_fitting": [[84, "module-graphnet.training.weight_fitting"]], "argparse": [[86, "module-graphnet.utilities.argparse"]], "config": [[87, "module-graphnet.utilities.config"]], "base_config": [[88, "module-graphnet.utilities.config.base_config"]], "configurable": [[89, "module-graphnet.utilities.config.configurable"]], "dataset_config": [[90, "module-graphnet.utilities.config.dataset_config"]], "model_config": [[91, "module-graphnet.utilities.config.model_config"]], "parsing": [[92, "module-graphnet.utilities.config.parsing"]], "training_config": [[93, "module-graphnet.utilities.config.training_config"]], "decorators": [[94, "module-graphnet.utilities.decorators"]], "deprecation_tools": [[95, "module-graphnet.utilities.deprecation_tools"]], "filesys": [[96, "module-graphnet.utilities.filesys"]], "imports": [[97, "module-graphnet.utilities.imports"]], "logging": [[98, "module-graphnet.utilities.logging"]], "maths": [[99, "module-graphnet.utilities.maths"]], "src": [[100, "src"]], "Contribute": [[101, "contribute"]], "GitHub issues": [[101, "github-issues"]], "Pull requests": [[101, "pull-requests"]], "Conventions": [[101, "conventions"]], "Code quality": [[101, "code-quality"]], "Install": [[103, "install"]], "Installing with IceTray": [[103, "installing-with-icetray"]], "Installing stand-alone": [[103, "installing-stand-alone"]], "Running in Docker": [[103, "running-in-docker"]]}, "indexentries": {"graphnet": [[1, "module-graphnet"]], "module": [[1, "module-graphnet"], [2, "module-graphnet.constants"], [3, "module-graphnet.data"], [4, "module-graphnet.data.constants"], [5, "module-graphnet.data.dataconverter"], [13, "module-graphnet.data.extractors"], [14, "module-graphnet.data.extractors.i3extractor"], [15, "module-graphnet.data.extractors.i3featureextractor"], [16, "module-graphnet.data.extractors.i3genericextractor"], [17, "module-graphnet.data.extractors.i3hybridrecoextractor"], [18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"], [19, "module-graphnet.data.extractors.i3particleextractor"], [20, "module-graphnet.data.extractors.i3pisaextractor"], [21, "module-graphnet.data.extractors.i3quesoextractor"], [22, "module-graphnet.data.extractors.i3retroextractor"], [23, "module-graphnet.data.extractors.i3splinempeextractor"], [24, "module-graphnet.data.extractors.i3truthextractor"], [25, "module-graphnet.data.extractors.i3tumextractor"], [26, "module-graphnet.data.extractors.utilities"], [27, "module-graphnet.data.extractors.utilities.collections"], [28, "module-graphnet.data.extractors.utilities.frames"], [29, "module-graphnet.data.extractors.utilities.types"], [30, "module-graphnet.data.filters"], [31, "module-graphnet.data.parquet"], [32, "module-graphnet.data.parquet.parquet_dataconverter"], [34, "module-graphnet.data.sqlite"], [35, "module-graphnet.data.sqlite.sqlite_dataconverter"], [36, "module-graphnet.data.sqlite.sqlite_utilities"], [37, "module-graphnet.data.utilities"], [38, "module-graphnet.data.utilities.parquet_to_sqlite"], [39, "module-graphnet.data.utilities.random"], [40, "module-graphnet.data.utilities.string_selection_resolver"], [41, "module-graphnet.deployment"], [50, "module-graphnet.models.detector"], [51, "module-graphnet.models.detector.detector"], [52, "module-graphnet.models.detector.icecube"], [53, "module-graphnet.models.detector.prometheus"], [69, "module-graphnet.models.model"], [76, "module-graphnet.pisa"], [77, "module-graphnet.pisa.fitting"], [78, "module-graphnet.pisa.plotting"], [79, "module-graphnet.training"], [80, "module-graphnet.training.callbacks"], [81, "module-graphnet.training.labels"], [82, "module-graphnet.training.loss_functions"], [84, "module-graphnet.training.weight_fitting"], [85, "module-graphnet.utilities"], [86, "module-graphnet.utilities.argparse"], [87, "module-graphnet.utilities.config"], [88, "module-graphnet.utilities.config.base_config"], [89, "module-graphnet.utilities.config.configurable"], [90, "module-graphnet.utilities.config.dataset_config"], [91, "module-graphnet.utilities.config.model_config"], [92, "module-graphnet.utilities.config.parsing"], [93, "module-graphnet.utilities.config.training_config"], [94, "module-graphnet.utilities.decorators"], [95, "module-graphnet.utilities.deprecation_tools"], [96, "module-graphnet.utilities.filesys"], [97, "module-graphnet.utilities.imports"], [98, "module-graphnet.utilities.logging"], [99, "module-graphnet.utilities.maths"]], "graphnet.constants": [[2, "module-graphnet.constants"]], "graphnet.data": [[3, "module-graphnet.data"]], "deepcore (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.DEEPCORE"]], "deepcore (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.DEEPCORE"]], "features (class in graphnet.data.constants)": [[4, "graphnet.data.constants.FEATURES"]], "icecube86 (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.ICECUBE86"]], "icecube86 (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.ICECUBE86"]], "kaggle (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.KAGGLE"]], "kaggle (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.KAGGLE"]], "prometheus (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.PROMETHEUS"]], "prometheus (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.PROMETHEUS"]], "truth (class in graphnet.data.constants)": [[4, "graphnet.data.constants.TRUTH"]], "upgrade (graphnet.data.constants.features attribute)": [[4, "graphnet.data.constants.FEATURES.UPGRADE"]], "upgrade (graphnet.data.constants.truth attribute)": [[4, "graphnet.data.constants.TRUTH.UPGRADE"]], "graphnet.data.constants": [[4, "module-graphnet.data.constants"]], "dataconverter (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.DataConverter"]], "fileset (class in graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.FileSet"]], "cache_output_files() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.cache_output_files"]], "execute() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.execute"]], "file_suffix (graphnet.data.dataconverter.dataconverter property)": [[5, "graphnet.data.dataconverter.DataConverter.file_suffix"]], "gcd_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.gcd_file"]], "get_map_function() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.get_map_function"]], "graphnet.data.dataconverter": [[5, "module-graphnet.data.dataconverter"]], "i3_file (graphnet.data.dataconverter.fileset attribute)": [[5, "graphnet.data.dataconverter.FileSet.i3_file"]], "init_global_index() (in module graphnet.data.dataconverter)": [[5, "graphnet.data.dataconverter.init_global_index"]], "merge_files() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.merge_files"]], "save_data() (graphnet.data.dataconverter.dataconverter method)": [[5, "graphnet.data.dataconverter.DataConverter.save_data"]], "graphnet.data.extractors": [[13, "module-graphnet.data.extractors"]], "i3extractor (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor"]], "i3extractorcollection (class in graphnet.data.extractors.i3extractor)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection"]], "graphnet.data.extractors.i3extractor": [[14, "module-graphnet.data.extractors.i3extractor"]], "name (graphnet.data.extractors.i3extractor.i3extractor property)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.name"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractor method)": [[14, "graphnet.data.extractors.i3extractor.I3Extractor.set_files"]], "set_files() (graphnet.data.extractors.i3extractor.i3extractorcollection method)": [[14, "graphnet.data.extractors.i3extractor.I3ExtractorCollection.set_files"]], "i3featureextractor (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractor"]], "i3featureextractoricecube86 (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCube86"]], "i3featureextractoricecubedeepcore (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeDeepCore"]], "i3featureextractoricecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3FeatureExtractorIceCubeUpgrade"]], "i3pulsenoisetruthflagicecubeupgrade (class in graphnet.data.extractors.i3featureextractor)": [[15, "graphnet.data.extractors.i3featureextractor.I3PulseNoiseTruthFlagIceCubeUpgrade"]], "graphnet.data.extractors.i3featureextractor": [[15, "module-graphnet.data.extractors.i3featureextractor"]], "i3genericextractor (class in graphnet.data.extractors.i3genericextractor)": [[16, "graphnet.data.extractors.i3genericextractor.I3GenericExtractor"]], "graphnet.data.extractors.i3genericextractor": [[16, "module-graphnet.data.extractors.i3genericextractor"]], "i3galacticplanehybridrecoextractor (class in graphnet.data.extractors.i3hybridrecoextractor)": [[17, "graphnet.data.extractors.i3hybridrecoextractor.I3GalacticPlaneHybridRecoExtractor"]], "graphnet.data.extractors.i3hybridrecoextractor": [[17, "module-graphnet.data.extractors.i3hybridrecoextractor"]], "i3ntmuonlabelextractor (class in graphnet.data.extractors.i3ntmuonlabelsextractor)": [[18, "graphnet.data.extractors.i3ntmuonlabelsextractor.I3NTMuonLabelExtractor"]], "graphnet.data.extractors.i3ntmuonlabelsextractor": [[18, "module-graphnet.data.extractors.i3ntmuonlabelsextractor"]], "i3particleextractor (class in graphnet.data.extractors.i3particleextractor)": [[19, "graphnet.data.extractors.i3particleextractor.I3ParticleExtractor"]], "graphnet.data.extractors.i3particleextractor": [[19, "module-graphnet.data.extractors.i3particleextractor"]], "i3pisaextractor (class in graphnet.data.extractors.i3pisaextractor)": [[20, "graphnet.data.extractors.i3pisaextractor.I3PISAExtractor"]], "graphnet.data.extractors.i3pisaextractor": [[20, "module-graphnet.data.extractors.i3pisaextractor"]], "i3quesoextractor (class in graphnet.data.extractors.i3quesoextractor)": [[21, "graphnet.data.extractors.i3quesoextractor.I3QUESOExtractor"]], "graphnet.data.extractors.i3quesoextractor": [[21, "module-graphnet.data.extractors.i3quesoextractor"]], "i3retroextractor (class in graphnet.data.extractors.i3retroextractor)": [[22, "graphnet.data.extractors.i3retroextractor.I3RetroExtractor"]], "graphnet.data.extractors.i3retroextractor": [[22, "module-graphnet.data.extractors.i3retroextractor"]], "i3splinempeicextractor (class in graphnet.data.extractors.i3splinempeextractor)": [[23, "graphnet.data.extractors.i3splinempeextractor.I3SplineMPEICExtractor"]], "graphnet.data.extractors.i3splinempeextractor": [[23, "module-graphnet.data.extractors.i3splinempeextractor"]], "i3truthextractor (class in graphnet.data.extractors.i3truthextractor)": [[24, "graphnet.data.extractors.i3truthextractor.I3TruthExtractor"]], "graphnet.data.extractors.i3truthextractor": [[24, "module-graphnet.data.extractors.i3truthextractor"]], "i3tumextractor (class in graphnet.data.extractors.i3tumextractor)": [[25, "graphnet.data.extractors.i3tumextractor.I3TUMExtractor"]], "graphnet.data.extractors.i3tumextractor": [[25, "module-graphnet.data.extractors.i3tumextractor"]], "graphnet.data.extractors.utilities": [[26, "module-graphnet.data.extractors.utilities"]], "flatten_nested_dictionary() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.flatten_nested_dictionary"]], "graphnet.data.extractors.utilities.collections": [[27, "module-graphnet.data.extractors.utilities.collections"]], "serialise() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.serialise"]], "transpose_list_of_dicts() (in module graphnet.data.extractors.utilities.collections)": [[27, "graphnet.data.extractors.utilities.collections.transpose_list_of_dicts"]], "frame_is_montecarlo() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_montecarlo"]], "frame_is_noise() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.frame_is_noise"]], "get_om_keys_and_pulseseries() (in module graphnet.data.extractors.utilities.frames)": [[28, "graphnet.data.extractors.utilities.frames.get_om_keys_and_pulseseries"]], "graphnet.data.extractors.utilities.frames": [[28, "module-graphnet.data.extractors.utilities.frames"]], "break_cyclic_recursion() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.break_cyclic_recursion"]], "cast_object_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_object_to_pure_python"]], "cast_pulse_series_to_pure_python() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.cast_pulse_series_to_pure_python"]], "get_member_variables() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.get_member_variables"]], "graphnet.data.extractors.utilities.types": [[29, "module-graphnet.data.extractors.utilities.types"]], "is_boost_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_class"]], "is_boost_enum() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_boost_enum"]], "is_icecube_class() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_icecube_class"]], "is_method() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_method"]], "is_type() (in module graphnet.data.extractors.utilities.types)": [[29, "graphnet.data.extractors.utilities.types.is_type"]], "i3filter (class in graphnet.data.filters)": [[30, "graphnet.data.filters.I3Filter"]], "i3filtermask (class in graphnet.data.filters)": [[30, "graphnet.data.filters.I3FilterMask"]], "nullspliti3filter (class in graphnet.data.filters)": [[30, "graphnet.data.filters.NullSplitI3Filter"]], "graphnet.data.filters": [[30, "module-graphnet.data.filters"]], "graphnet.data.parquet": [[31, "module-graphnet.data.parquet"]], "parquetdataconverter (class in graphnet.data.parquet.parquet_dataconverter)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter"]], "file_suffix (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter attribute)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.file_suffix"]], "graphnet.data.parquet.parquet_dataconverter": [[32, "module-graphnet.data.parquet.parquet_dataconverter"]], "merge_files() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.merge_files"]], "save_data() (graphnet.data.parquet.parquet_dataconverter.parquetdataconverter method)": [[32, "graphnet.data.parquet.parquet_dataconverter.ParquetDataConverter.save_data"]], "graphnet.data.sqlite": [[34, "module-graphnet.data.sqlite"]], "sqlitedataconverter (class in graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter"]], "any_pulsemap_is_non_empty() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.any_pulsemap_is_non_empty"]], "construct_dataframe() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.construct_dataframe"]], "file_suffix (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter attribute)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.file_suffix"]], "graphnet.data.sqlite.sqlite_dataconverter": [[35, "module-graphnet.data.sqlite.sqlite_dataconverter"]], "is_mc_tree() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.is_mc_tree"]], "is_pulse_map() (in module graphnet.data.sqlite.sqlite_dataconverter)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.is_pulse_map"]], "merge_files() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.merge_files"]], "save_data() (graphnet.data.sqlite.sqlite_dataconverter.sqlitedataconverter method)": [[35, "graphnet.data.sqlite.sqlite_dataconverter.SQLiteDataConverter.save_data"]], "attach_index() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.attach_index"]], "create_table() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.create_table"]], "create_table_and_save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.create_table_and_save_to_sql"]], "database_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.database_exists"]], "database_table_exists() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.database_table_exists"]], "graphnet.data.sqlite.sqlite_utilities": [[36, "module-graphnet.data.sqlite.sqlite_utilities"]], "run_sql_code() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.run_sql_code"]], "save_to_sql() (in module graphnet.data.sqlite.sqlite_utilities)": [[36, "graphnet.data.sqlite.sqlite_utilities.save_to_sql"]], "graphnet.data.utilities": [[37, "module-graphnet.data.utilities"]], "parquettosqliteconverter (class in graphnet.data.utilities.parquet_to_sqlite)": [[38, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter"]], "graphnet.data.utilities.parquet_to_sqlite": [[38, "module-graphnet.data.utilities.parquet_to_sqlite"]], "run() (graphnet.data.utilities.parquet_to_sqlite.parquettosqliteconverter method)": [[38, "graphnet.data.utilities.parquet_to_sqlite.ParquetToSQLiteConverter.run"]], "graphnet.data.utilities.random": [[39, "module-graphnet.data.utilities.random"]], "pairwise_shuffle() (in module graphnet.data.utilities.random)": [[39, "graphnet.data.utilities.random.pairwise_shuffle"]], "stringselectionresolver (class in graphnet.data.utilities.string_selection_resolver)": [[40, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver"]], "graphnet.data.utilities.string_selection_resolver": [[40, "module-graphnet.data.utilities.string_selection_resolver"]], "resolve() (graphnet.data.utilities.string_selection_resolver.stringselectionresolver method)": [[40, "graphnet.data.utilities.string_selection_resolver.StringSelectionResolver.resolve"]], "graphnet.deployment": [[41, "module-graphnet.deployment"]], "graphnet.models.detector": [[50, "module-graphnet.models.detector"]], "detector (class in graphnet.models.detector.detector)": [[51, "graphnet.models.detector.detector.Detector"]], "feature_map() (graphnet.models.detector.detector.detector method)": [[51, "graphnet.models.detector.detector.Detector.feature_map"]], "forward() (graphnet.models.detector.detector.detector method)": [[51, "graphnet.models.detector.detector.Detector.forward"]], "geometry_table (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.geometry_table"]], "graphnet.models.detector.detector": [[51, "module-graphnet.models.detector.detector"]], "sensor_index_name (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.sensor_index_name"]], "sensor_position_names (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.sensor_position_names"]], "string_index_name (graphnet.models.detector.detector.detector property)": [[51, "graphnet.models.detector.detector.Detector.string_index_name"]], "icecube86 (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCube86"]], "icecubedeepcore (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCubeDeepCore"]], "icecubekaggle (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCubeKaggle"]], "icecubeupgrade (class in graphnet.models.detector.icecube)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade"]], "feature_map() (graphnet.models.detector.icecube.icecube86 method)": [[52, "graphnet.models.detector.icecube.IceCube86.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubedeepcore method)": [[52, "graphnet.models.detector.icecube.IceCubeDeepCore.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubekaggle method)": [[52, "graphnet.models.detector.icecube.IceCubeKaggle.feature_map"]], "feature_map() (graphnet.models.detector.icecube.icecubeupgrade method)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.feature_map"]], "geometry_table_path (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.geometry_table_path"]], "geometry_table_path (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.geometry_table_path"]], "graphnet.models.detector.icecube": [[52, "module-graphnet.models.detector.icecube"]], "sensor_id_column (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.sensor_id_column"]], "sensor_id_column (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.sensor_id_column"]], "string_id_column (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.string_id_column"]], "string_id_column (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.string_id_column"]], "xyz (graphnet.models.detector.icecube.icecube86 attribute)": [[52, "graphnet.models.detector.icecube.IceCube86.xyz"]], "xyz (graphnet.models.detector.icecube.icecubeupgrade attribute)": [[52, "graphnet.models.detector.icecube.IceCubeUpgrade.xyz"]], "orca150 (class in graphnet.models.detector.prometheus)": [[53, "graphnet.models.detector.prometheus.ORCA150"]], "prometheus (class in graphnet.models.detector.prometheus)": [[53, "graphnet.models.detector.prometheus.Prometheus"]], "feature_map() (graphnet.models.detector.prometheus.orca150 method)": [[53, "graphnet.models.detector.prometheus.ORCA150.feature_map"]], "geometry_table_path (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.geometry_table_path"]], "graphnet.models.detector.prometheus": [[53, "module-graphnet.models.detector.prometheus"]], "sensor_id_column (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.sensor_id_column"]], "string_id_column (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.string_id_column"]], "xyz (graphnet.models.detector.prometheus.orca150 attribute)": [[53, "graphnet.models.detector.prometheus.ORCA150.xyz"]], "model (class in graphnet.models.model)": [[69, "graphnet.models.model.Model"]], "from_config() (graphnet.models.model.model class method)": [[69, "graphnet.models.model.Model.from_config"]], "graphnet.models.model": [[69, "module-graphnet.models.model"]], "load() (graphnet.models.model.model class method)": [[69, "graphnet.models.model.Model.load"]], "load_state_dict() (graphnet.models.model.model method)": [[69, "graphnet.models.model.Model.load_state_dict"]], "save() (graphnet.models.model.model method)": [[69, "graphnet.models.model.Model.save"]], "save_state_dict() (graphnet.models.model.model method)": [[69, "graphnet.models.model.Model.save_state_dict"]], "graphnet.pisa": [[76, "module-graphnet.pisa"]], "contourfitter (class in graphnet.pisa.fitting)": [[77, "graphnet.pisa.fitting.ContourFitter"]], "weightfitter (class in graphnet.pisa.fitting)": [[77, "graphnet.pisa.fitting.WeightFitter"]], "config_updater() (in module graphnet.pisa.fitting)": [[77, "graphnet.pisa.fitting.config_updater"]], "fit_1d_contour() (graphnet.pisa.fitting.contourfitter method)": [[77, "graphnet.pisa.fitting.ContourFitter.fit_1d_contour"]], "fit_2d_contour() (graphnet.pisa.fitting.contourfitter method)": [[77, "graphnet.pisa.fitting.ContourFitter.fit_2d_contour"]], "fit_weights() (graphnet.pisa.fitting.weightfitter method)": [[77, "graphnet.pisa.fitting.WeightFitter.fit_weights"]], "graphnet.pisa.fitting": [[77, "module-graphnet.pisa.fitting"]], "graphnet.pisa.plotting": [[78, "module-graphnet.pisa.plotting"]], "plot_1d_contour() (in module graphnet.pisa.plotting)": [[78, "graphnet.pisa.plotting.plot_1D_contour"]], "plot_2d_contour() (in module graphnet.pisa.plotting)": [[78, "graphnet.pisa.plotting.plot_2D_contour"]], "read_entry() (in module graphnet.pisa.plotting)": [[78, "graphnet.pisa.plotting.read_entry"]], "graphnet.training": [[79, "module-graphnet.training"]], "graphnetearlystopping (class in graphnet.training.callbacks)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping"]], "piecewiselinearlr (class in graphnet.training.callbacks)": [[80, "graphnet.training.callbacks.PiecewiseLinearLR"]], "progressbar (class in graphnet.training.callbacks)": [[80, "graphnet.training.callbacks.ProgressBar"]], "get_lr() (graphnet.training.callbacks.piecewiselinearlr method)": [[80, "graphnet.training.callbacks.PiecewiseLinearLR.get_lr"]], "get_metrics() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.get_metrics"]], "graphnet.training.callbacks": [[80, "module-graphnet.training.callbacks"]], "init_predict_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_predict_tqdm"]], "init_test_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_test_tqdm"]], "init_train_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_train_tqdm"]], "init_validation_tqdm() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.init_validation_tqdm"]], "on_fit_end() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.on_fit_end"]], "on_train_epoch_end() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.on_train_epoch_end"]], "on_train_epoch_end() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.on_train_epoch_end"]], "on_train_epoch_start() (graphnet.training.callbacks.progressbar method)": [[80, "graphnet.training.callbacks.ProgressBar.on_train_epoch_start"]], "on_validation_end() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.on_validation_end"]], "setup() (graphnet.training.callbacks.graphnetearlystopping method)": [[80, "graphnet.training.callbacks.GraphnetEarlyStopping.setup"]], "direction (class in graphnet.training.labels)": [[81, "graphnet.training.labels.Direction"]], "label (class in graphnet.training.labels)": [[81, "graphnet.training.labels.Label"]], "graphnet.training.labels": [[81, "module-graphnet.training.labels"]], "key (graphnet.training.labels.label property)": [[81, "graphnet.training.labels.Label.key"]], "binarycrossentropyloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.BinaryCrossEntropyLoss"]], "crossentropyloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.CrossEntropyLoss"]], "euclideandistanceloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.EuclideanDistanceLoss"]], "logcmk (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.LogCMK"]], "logcoshloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.LogCoshLoss"]], "lossfunction (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.LossFunction"]], "mseloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.MSELoss"]], "rmseloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.RMSELoss"]], "vonmisesfisher2dloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.VonMisesFisher2DLoss"]], "vonmisesfisher3dloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.VonMisesFisher3DLoss"]], "vonmisesfisherloss (class in graphnet.training.loss_functions)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss"]], "backward() (graphnet.training.loss_functions.logcmk static method)": [[82, "graphnet.training.loss_functions.LogCMK.backward"]], "forward() (graphnet.training.loss_functions.logcmk static method)": [[82, "graphnet.training.loss_functions.LogCMK.forward"]], "forward() (graphnet.training.loss_functions.lossfunction method)": [[82, "graphnet.training.loss_functions.LossFunction.forward"]], "graphnet.training.loss_functions": [[82, "module-graphnet.training.loss_functions"]], "log_cmk() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk"]], "log_cmk_approx() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk_approx"]], "log_cmk_exact() (graphnet.training.loss_functions.vonmisesfisherloss class method)": [[82, "graphnet.training.loss_functions.VonMisesFisherLoss.log_cmk_exact"]], "bjoernlow (class in graphnet.training.weight_fitting)": [[84, "graphnet.training.weight_fitting.BjoernLow"]], "uniform (class in graphnet.training.weight_fitting)": [[84, "graphnet.training.weight_fitting.Uniform"]], "weightfitter (class in graphnet.training.weight_fitting)": [[84, "graphnet.training.weight_fitting.WeightFitter"]], "fit() (graphnet.training.weight_fitting.weightfitter method)": [[84, "graphnet.training.weight_fitting.WeightFitter.fit"]], "graphnet.training.weight_fitting": [[84, "module-graphnet.training.weight_fitting"]], "graphnet.utilities": [[85, "module-graphnet.utilities"]], "argumentparser (class in graphnet.utilities.argparse)": [[86, "graphnet.utilities.argparse.ArgumentParser"]], "options (class in graphnet.utilities.argparse)": [[86, "graphnet.utilities.argparse.Options"]], "contains() (graphnet.utilities.argparse.options method)": [[86, "graphnet.utilities.argparse.Options.contains"]], "graphnet.utilities.argparse": [[86, "module-graphnet.utilities.argparse"]], "pop_default() (graphnet.utilities.argparse.options method)": [[86, "graphnet.utilities.argparse.Options.pop_default"]], "standard_arguments (graphnet.utilities.argparse.argumentparser attribute)": [[86, "graphnet.utilities.argparse.ArgumentParser.standard_arguments"]], "with_standard_arguments() (graphnet.utilities.argparse.argumentparser method)": [[86, "graphnet.utilities.argparse.ArgumentParser.with_standard_arguments"]], "graphnet.utilities.config": [[87, "module-graphnet.utilities.config"]], "baseconfig (class in graphnet.utilities.config.base_config)": [[88, "graphnet.utilities.config.base_config.BaseConfig"]], "as_dict() (graphnet.utilities.config.base_config.baseconfig method)": [[88, "graphnet.utilities.config.base_config.BaseConfig.as_dict"]], "dump() (graphnet.utilities.config.base_config.baseconfig method)": [[88, "graphnet.utilities.config.base_config.BaseConfig.dump"]], "get_all_argument_values() (in module graphnet.utilities.config.base_config)": [[88, "graphnet.utilities.config.base_config.get_all_argument_values"]], "graphnet.utilities.config.base_config": [[88, "module-graphnet.utilities.config.base_config"]], "load() (graphnet.utilities.config.base_config.baseconfig class method)": [[88, "graphnet.utilities.config.base_config.BaseConfig.load"]], "model_computed_fields (graphnet.utilities.config.base_config.baseconfig attribute)": [[88, "graphnet.utilities.config.base_config.BaseConfig.model_computed_fields"]], "model_config (graphnet.utilities.config.base_config.baseconfig attribute)": [[88, "graphnet.utilities.config.base_config.BaseConfig.model_config"]], "model_fields (graphnet.utilities.config.base_config.baseconfig attribute)": [[88, "graphnet.utilities.config.base_config.BaseConfig.model_fields"]], "configurable (class in graphnet.utilities.config.configurable)": [[89, "graphnet.utilities.config.configurable.Configurable"]], "config (graphnet.utilities.config.configurable.configurable property)": [[89, "graphnet.utilities.config.configurable.Configurable.config"]], "from_config() (graphnet.utilities.config.configurable.configurable class method)": [[89, "graphnet.utilities.config.configurable.Configurable.from_config"]], "graphnet.utilities.config.configurable": [[89, "module-graphnet.utilities.config.configurable"]], "save_config() (graphnet.utilities.config.configurable.configurable method)": [[89, "graphnet.utilities.config.configurable.Configurable.save_config"]], "datasetconfig (class in graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig"]], "datasetconfigsaverabcmeta (class in graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfigSaverABCMeta"]], "datasetconfigsavermeta (class in graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfigSaverMeta"]], "as_dict() (graphnet.utilities.config.dataset_config.datasetconfig method)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.as_dict"]], "features (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.features"]], "graph_definition (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.graph_definition"]], "graphnet.utilities.config.dataset_config": [[90, "module-graphnet.utilities.config.dataset_config"]], "index_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.index_column"]], "loss_weight_column (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_column"]], "loss_weight_default_value (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_default_value"]], "loss_weight_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.loss_weight_table"]], "model_computed_fields (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.model_computed_fields"]], "model_config (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.model_config"]], "model_fields (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.model_fields"]], "node_truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth"]], "node_truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.node_truth_table"]], "path (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.path"]], "pulsemaps (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.pulsemaps"]], "save_dataset_config() (in module graphnet.utilities.config.dataset_config)": [[90, "graphnet.utilities.config.dataset_config.save_dataset_config"]], "seed (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.seed"]], "selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.selection"]], "string_selection (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.string_selection"]], "truth (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.truth"]], "truth_table (graphnet.utilities.config.dataset_config.datasetconfig attribute)": [[90, "graphnet.utilities.config.dataset_config.DatasetConfig.truth_table"]], "modelconfig (class in graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.ModelConfig"]], "modelconfigsaverabc (class in graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.ModelConfigSaverABC"]], "modelconfigsavermeta (class in graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.ModelConfigSaverMeta"]], "arguments (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.arguments"]], "as_dict() (graphnet.utilities.config.model_config.modelconfig method)": [[91, "graphnet.utilities.config.model_config.ModelConfig.as_dict"]], "class_name (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.class_name"]], "graphnet.utilities.config.model_config": [[91, "module-graphnet.utilities.config.model_config"]], "model_computed_fields (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.model_computed_fields"]], "model_config (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.model_config"]], "model_fields (graphnet.utilities.config.model_config.modelconfig attribute)": [[91, "graphnet.utilities.config.model_config.ModelConfig.model_fields"]], "save_model_config() (in module graphnet.utilities.config.model_config)": [[91, "graphnet.utilities.config.model_config.save_model_config"]], "get_all_grapnet_classes() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.get_all_grapnet_classes"]], "get_graphnet_classes() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.get_graphnet_classes"]], "graphnet.utilities.config.parsing": [[92, "module-graphnet.utilities.config.parsing"]], "is_graphnet_class() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.is_graphnet_class"]], "is_graphnet_module() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.is_graphnet_module"]], "list_all_submodules() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.list_all_submodules"]], "traverse_and_apply() (in module graphnet.utilities.config.parsing)": [[92, "graphnet.utilities.config.parsing.traverse_and_apply"]], "trainingconfig (class in graphnet.utilities.config.training_config)": [[93, "graphnet.utilities.config.training_config.TrainingConfig"]], "dataloader (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.dataloader"]], "early_stopping_patience (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.early_stopping_patience"]], "fit (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.fit"]], "graphnet.utilities.config.training_config": [[93, "module-graphnet.utilities.config.training_config"]], "model_computed_fields (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.model_computed_fields"]], "model_config (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.model_config"]], "model_fields (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.model_fields"]], "target (graphnet.utilities.config.training_config.trainingconfig attribute)": [[93, "graphnet.utilities.config.training_config.TrainingConfig.target"]], "graphnet.utilities.decorators": [[94, "module-graphnet.utilities.decorators"]], "graphnet.utilities.deprecation_tools": [[95, "module-graphnet.utilities.deprecation_tools"]], "rename_state_dict_entries() (in module graphnet.utilities.deprecation_tools)": [[95, "graphnet.utilities.deprecation_tools.rename_state_dict_entries"]], "find_i3_files() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.find_i3_files"]], "graphnet.utilities.filesys": [[96, "module-graphnet.utilities.filesys"]], "has_extension() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.has_extension"]], "is_gcd_file() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.is_gcd_file"]], "is_i3_file() (in module graphnet.utilities.filesys)": [[96, "graphnet.utilities.filesys.is_i3_file"]], "graphnet.utilities.imports": [[97, "module-graphnet.utilities.imports"]], "has_icecube_package() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.has_icecube_package"]], "has_pisa_package() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.has_pisa_package"]], "has_torch_package() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.has_torch_package"]], "requires_icecube() (in module graphnet.utilities.imports)": [[97, "graphnet.utilities.imports.requires_icecube"]], "logger (class in graphnet.utilities.logging)": [[98, "graphnet.utilities.logging.Logger"]], "repeatfilter (class in graphnet.utilities.logging)": [[98, "graphnet.utilities.logging.RepeatFilter"]], "critical() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.critical"]], "debug() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.debug"]], "error() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.error"]], "file_handlers (graphnet.utilities.logging.logger property)": [[98, "graphnet.utilities.logging.Logger.file_handlers"]], "filter() (graphnet.utilities.logging.repeatfilter method)": [[98, "graphnet.utilities.logging.RepeatFilter.filter"]], "graphnet.utilities.logging": [[98, "module-graphnet.utilities.logging"]], "handlers (graphnet.utilities.logging.logger property)": [[98, "graphnet.utilities.logging.Logger.handlers"]], "info() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.info"]], "nb_repeats_allowed (graphnet.utilities.logging.repeatfilter attribute)": [[98, "graphnet.utilities.logging.RepeatFilter.nb_repeats_allowed"]], "setlevel() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.setLevel"]], "stream_handlers (graphnet.utilities.logging.logger property)": [[98, "graphnet.utilities.logging.Logger.stream_handlers"]], "warning() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.warning"]], "warning_once() (graphnet.utilities.logging.logger method)": [[98, "graphnet.utilities.logging.Logger.warning_once"]], "eps_like() (in module graphnet.utilities.maths)": [[99, "graphnet.utilities.maths.eps_like"]], "graphnet.utilities.maths": [[99, "module-graphnet.utilities.maths"]]}}) \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index c8d271b9e..fba7f8a92 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -1 +1 @@ -https://graphnet-team.github.io/graphnetabout.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataloader.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.parquet_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.sqlite_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3extractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3featureextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3genericextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3particleextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3retroextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3truthextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3tumextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.collections.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.frames.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.types.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.filters.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.pipeline.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.random.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.deployer.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.graphnet_module.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.coarsening.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.layers.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.pool.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.icecube.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.prometheus.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.convnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_jinst.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_kaggle_tito.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.minkowski.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graph_definition.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.standard_model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.classification.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.reconstruction.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.plotting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.callbacks.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.labels.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.loss_functions.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.weight_fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.argparse.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.base_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.configurable.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.dataset_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.model_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.parsing.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.training_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.decorators.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.deprecation_tools.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.filesys.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.imports.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.logging.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.maths.htmlhttps://graphnet-team.github.io/graphnetapi/modules.htmlhttps://graphnet-team.github.io/graphnetcontribute.htmlhttps://graphnet-team.github.io/graphnetindex.htmlhttps://graphnet-team.github.io/graphnetinstall.htmlhttps://graphnet-team.github.io/graphnetgenindex.htmlhttps://graphnet-team.github.io/graphnetpy-modindex.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/constants.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataloader.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataset/dataset.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataset/parquet/parquet_dataset.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataset/sqlite/sqlite_dataset.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3extractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3featureextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3genericextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3particleextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3retroextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3truthextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3tumextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/collections.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/frames.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/types.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/filters.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/parquet/parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/pipeline.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/random.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/deployment/i3modules/graphnet_module.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/coarsening.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/components/layers.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/components/pool.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/detector.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/icecube.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/prometheus.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/convnet.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/dynedge.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/dynedge_jinst.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/dynedge_kaggle_tito.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/gnn/gnn.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/edges/edges.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/edges/minkowski.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/graph_definition.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/graphs.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/nodes/nodes.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/graphs/utils.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/model.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/standard_model.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/task/classification.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/task/reconstruction.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/task/task.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/utils.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/plotting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/callbacks.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/labels.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/loss_functions.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/utils.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/weight_fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/argparse.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/base_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/configurable.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/dataset_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/model_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/parsing.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/training_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/deprecation_tools.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/filesys.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/imports.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/logging.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/maths.htmlhttps://graphnet-team.github.io/graphnet_modules/index.htmlhttps://graphnet-team.github.io/graphnetsearch.html \ No newline at end of file +https://graphnet-team.github.io/graphnetabout.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.constants.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataloader.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.parquet.parquet_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.dataset.sqlite.sqlite_dataset.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3extractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3featureextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3genericextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3particleextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3retroextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3truthextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.i3tumextractor.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.collections.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.frames.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.extractors.utilities.types.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.filters.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.parquet.parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.pipeline.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.sqlite.sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.random.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.data.utilities.string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.deployer.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.deployment.i3modules.graphnet_module.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.coarsening.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.layers.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.components.pool.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.detector.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.icecube.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.detector.prometheus.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.convnet.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_jinst.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.dynedge_kaggle_tito.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.gnn.gnn.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.edges.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.edges.minkowski.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graph_definition.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.graphs.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.nodes.nodes.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.graphs.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.standard_model.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.classification.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.reconstruction.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.task.task.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.models.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.pisa.plotting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.callbacks.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.labels.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.loss_functions.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.utils.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.training.weight_fitting.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.argparse.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.base_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.configurable.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.dataset_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.model_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.parsing.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.config.training_config.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.decorators.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.deprecation_tools.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.filesys.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.imports.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.logging.htmlhttps://graphnet-team.github.io/graphnetapi/graphnet.utilities.maths.htmlhttps://graphnet-team.github.io/graphnetapi/modules.htmlhttps://graphnet-team.github.io/graphnetcontribute.htmlhttps://graphnet-team.github.io/graphnetindex.htmlhttps://graphnet-team.github.io/graphnetinstall.htmlhttps://graphnet-team.github.io/graphnetgenindex.htmlhttps://graphnet-team.github.io/graphnetpy-modindex.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/constants.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3extractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3featureextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3genericextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3hybridrecoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3ntmuonlabelsextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3particleextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3pisaextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3quesoextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3retroextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3splinempeextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3truthextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/i3tumextractor.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/collections.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/frames.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/extractors/utilities/types.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/filters.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/parquet/parquet_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_dataconverter.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/sqlite/sqlite_utilities.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/parquet_to_sqlite.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/random.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/data/utilities/string_selection_resolver.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/detector.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/icecube.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/detector/prometheus.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/models/model.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/pisa/plotting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/callbacks.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/labels.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/loss_functions.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/training/weight_fitting.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/argparse.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/base_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/configurable.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/dataset_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/model_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/parsing.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/config/training_config.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/deprecation_tools.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/filesys.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/imports.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/logging.htmlhttps://graphnet-team.github.io/graphnet_modules/graphnet/utilities/maths.htmlhttps://graphnet-team.github.io/graphnet_modules/index.htmlhttps://graphnet-team.github.io/graphnetsearch.html \ No newline at end of file