Skip to content

Commit

Permalink
Merge pull request #378 from asogaard/restructure-examples
Browse files Browse the repository at this point in the history
Restructure examples
  • Loading branch information
asogaard authored Jan 20, 2023
2 parents 97c9d51 + da9c22c commit b2bad25
Show file tree
Hide file tree
Showing 69 changed files with 1,461 additions and 1,148 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,12 @@ wandb/
checkpoints/

# Data and outputs
test_data/output/
data/tests/output/
data/examples/output/
**.png
**.pth
**.db
**.parquet

# Notebooks
**.ipynb
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
path: /groups/icecube/asogaard/data/sqlite/dev_lvl7_robustness_muon_neutrino_0000/data/dev_lvl7_robustness_muon_neutrino_0000.db
path: /groups/icecube/asogaard/data/example/dev_lvl7_robustness_muon_neutrino_0000.db
pulsemaps:
- SRTTWOfflinePulsesDC
features:
Expand Down
26 changes: 26 additions & 0 deletions configs/datasets/test_data_sqlite.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
path: /groups/icecube/asogaard/work/development/graphnet/data/tests/sqlite/oscNext_genie_level7_v02/oscNext_genie_level7_v02_first_5_frames.db
pulsemaps:
- SRTInIcePulses
features:
- dom_x
- dom_y
- dom_z
- dom_time
- charge
- rde
- pmt_area
truth:
- energy
- position_x
- position_y
- position_z
- azimuth
- zenith
- pid
- elasticity
- sim_type
- interaction_type
index_column: event_no
truth_table: truth
seed: 21
selection: null
42 changes: 42 additions & 0 deletions configs/datasets/training_example_data_parquet.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
path: /groups/icecube/asogaard/work/development/graphnet/data/examples/parquet/prometheus/prometheus-events.parquet
pulsemaps:
- total
features:
- sensor_pos_x
- sensor_pos_y
- sensor_pos_z
- t
truth:
- injection_energy
- injection_type
- injection_interaction_type
- injection_zenith
- injection_azimuth
- injection_bjorkenx
- injection_bjorkeny
- injection_position_x
- injection_position_y
- injection_position_z
- injection_column_depth
- primary_lepton_1_type
- primary_hadron_1_type
- primary_lepton_1_position_x
- primary_lepton_1_position_y
- primary_lepton_1_position_z
- primary_hadron_1_position_x
- primary_hadron_1_position_y
- primary_hadron_1_position_z
- primary_lepton_1_direction_theta
- primary_lepton_1_direction_phi
- primary_hadron_1_direction_theta
- primary_hadron_1_direction_phi
- primary_lepton_1_energy
- primary_hadron_1_energy
- total_energy
index_column: event_no
truth_table: mc_truth
seed: 21
selection:
test: event_no % 5 == 0
validation: event_no % 5 == 1
train: event_no % 5 > 1
42 changes: 42 additions & 0 deletions configs/datasets/training_example_data_sqlite.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
path: /groups/icecube/asogaard/work/development/graphnet/data/examples/sqlite/prometheus/prometheus-events.db
pulsemaps:
- total
features:
- sensor_pos_x
- sensor_pos_y
- sensor_pos_z
- t
truth:
- injection_energy
- injection_type
- injection_interaction_type
- injection_zenith
- injection_azimuth
- injection_bjorkenx
- injection_bjorkeny
- injection_position_x
- injection_position_y
- injection_position_z
- injection_column_depth
- primary_lepton_1_type
- primary_hadron_1_type
- primary_lepton_1_position_x
- primary_lepton_1_position_y
- primary_lepton_1_position_z
- primary_hadron_1_position_x
- primary_hadron_1_position_y
- primary_hadron_1_position_z
- primary_lepton_1_direction_theta
- primary_lepton_1_direction_phi
- primary_hadron_1_direction_theta
- primary_hadron_1_direction_phi
- primary_lepton_1_energy
- primary_hadron_1_energy
- total_energy
index_column: event_no
truth_table: mc_truth
seed: 21
selection:
test: event_no % 5 == 0
validation: event_no % 5 == 1
train: event_no % 5 > 1
58 changes: 58 additions & 0 deletions configs/models/example_direction_reconstruction_model.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
arguments:
coarsening: null
detector:
ModelConfig:
arguments:
graph_builder:
ModelConfig:
arguments: {columns: null, nb_nearest_neighbours: 8}
class_name: KNNGraphBuilder
scalers: null
class_name: Prometheus
gnn:
ModelConfig:
arguments:
add_global_variables_after_pooling: false
dynedge_layer_sizes: null
features_subset: null
global_pooling_schemes: [min, max, mean, sum]
nb_inputs: 4
nb_neighbours: 8
post_processing_layer_sizes: null
readout_layer_sizes: null
class_name: DynEdge
optimizer_class: '!class torch.optim.adam Adam'
optimizer_kwargs: {eps: 0.001, lr: 1e-05}
scheduler_class: '!class torch.optim.lr_scheduler ReduceLROnPlateau'
scheduler_config: {frequency: 1, monitor: val_loss}
scheduler_kwargs: {patience: 5}
tasks:
- ModelConfig:
arguments:
hidden_size: 128
loss_function:
ModelConfig:
arguments: {}
class_name: VonMisesFisher2DLoss
loss_weight: null
target_labels: injection_zenith
transform_inference: null
transform_prediction_and_target: null
transform_support: null
transform_target: null
class_name: ZenithReconstructionWithKappa
- ModelConfig:
arguments:
hidden_size: 128
loss_function:
ModelConfig:
arguments: {}
class_name: VonMisesFisher2DLoss
loss_weight: null
target_labels: injection_azimuth
transform_inference: null
transform_prediction_and_target: null
transform_support: null
transform_target: null
class_name: AzimuthReconstructionWithKappa
class_name: StandardModel
44 changes: 44 additions & 0 deletions configs/models/example_energy_reconstruction_model.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
arguments:
coarsening: null
detector:
ModelConfig:
arguments:
graph_builder:
ModelConfig:
arguments: {columns: null, nb_nearest_neighbours: 8}
class_name: KNNGraphBuilder
scalers: null
class_name: Prometheus
gnn:
ModelConfig:
arguments:
add_global_variables_after_pooling: false
dynedge_layer_sizes: null
features_subset: null
global_pooling_schemes: [min, max, mean, sum]
nb_inputs: 4
nb_neighbours: 8
post_processing_layer_sizes: null
readout_layer_sizes: null
class_name: DynEdge
optimizer_class: '!class torch.optim.adam Adam'
optimizer_kwargs: {eps: 0.001, lr: 1e-05}
scheduler_class: '!class torch.optim.lr_scheduler ReduceLROnPlateau'
scheduler_config: {frequency: 1, monitor: val_loss}
scheduler_kwargs: {patience: 5}
tasks:
- ModelConfig:
arguments:
hidden_size: 128
loss_function:
ModelConfig:
arguments: {}
class_name: LogCoshLoss
loss_weight: null
target_labels: total_energy
transform_inference: null
transform_prediction_and_target: '!lambda x: torch.log10(x)'
transform_support: null
transform_target: null
class_name: EnergyReconstruction
class_name: StandardModel
46 changes: 46 additions & 0 deletions configs/models/example_vertex_position_reconstruction_model.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
arguments:
coarsening: null
detector:
ModelConfig:
arguments:
graph_builder:
ModelConfig:
arguments: {columns: null, nb_nearest_neighbours: 8}
class_name: KNNGraphBuilder
scalers: null
class_name: Prometheus
gnn:
ModelConfig:
arguments:
add_global_variables_after_pooling: false
dynedge_layer_sizes: null
features_subset: null
global_pooling_schemes: [min, max, mean, sum]
nb_inputs: 4
nb_neighbours: 8
post_processing_layer_sizes: null
readout_layer_sizes: null
class_name: DynEdge
optimizer_class: '!class torch.optim.adam Adam'
optimizer_kwargs: {eps: 0.001, lr: 0.001}
scheduler_class: '!class graphnet.training.callbacks PiecewiseLinearLR'
scheduler_config: {interval: step}
scheduler_kwargs:
factors: [0.01, 1, 0.01]
milestones: [0, 33.0, 330]
tasks:
- ModelConfig:
arguments:
hidden_size: 128
loss_function:
ModelConfig:
arguments: {}
class_name: MSELoss
nb_outputs: 3
target_labels: [injection_position_x, injection_position_y, injection_position_z]
transform_inference: "!function def unscale_XYZ(x):\n x[:,0] = 764.431509*x[:,0]\n x[:,1] =\
\ 785.041607*x[:,1]\n x[:,2] = 1083.249944*x[:,2]\n return x\n"
transform_target: "!function def scale_XYZ(x):\n x[:,0] = x[:,0]/764.431509\n x[:,1] =\
\ x[:,1]/785.041607\n x[:,2] = x[:,2]/1083.249944\n return x\n"
class_name: IdentityTask
class_name: StandardModel
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
import logging
import os

from graphnet.utilities.logging import get_logger

from graphnet.constants import EXAMPLE_OUTPUT_DIR, TEST_DATA_DIR
from graphnet.data.extractors import (
I3FeatureExtractorIceCubeUpgrade,
I3RetroExtractor,
Expand All @@ -14,6 +13,9 @@
from graphnet.data.dataconverter import DataConverter
from graphnet.data.parquet import ParquetDataConverter
from graphnet.data.sqlite import SQLiteDataConverter
from graphnet.utilities.argparse import ArgumentParser
from graphnet.utilities.imports import has_icecube_package
from graphnet.utilities.logging import get_logger

logger = get_logger(level=logging.INFO)

Expand All @@ -28,8 +30,8 @@ def main_icecube86(backend: str) -> None:
# Check(s)
assert backend in CONVERTER_CLASS

inputs = ["./test_data/"]
outdir = "./temp/test_ic86"
inputs = [f"{TEST_DATA_DIR}/i3/oscNext_genie_level7_v02"]
outdir = f"{EXAMPLE_OUTPUT_DIR}/convert_i3_files/ic86"

converter: DataConverter = CONVERTER_CLASS[backend](
[
Expand All @@ -53,8 +55,8 @@ def main_icecube_upgrade(backend: str) -> None:
# Check(s)
assert backend in CONVERTER_CLASS

inputs = ["test_data_upgrade_2"]
outdir = "./temp/test_upgrade"
inputs = [f"{TEST_DATA_DIR}/i3/upgrade_genie_step4_140028_000998"]
outdir = f"{EXAMPLE_OUTPUT_DIR}/convert_i3_files/ic86"
workers = 1

converter: DataConverter = CONVERTER_CLASS[backend](
Expand All @@ -81,7 +83,37 @@ def main_icecube_upgrade(backend: str) -> None:


if __name__ == "__main__":
backend = "parquet"
# backend = "sqlite"
main_icecube86(backend)
# main_icecube_upgrade(backend)

if not has_icecube_package():
logger.error(
"This example requries IceTray to be installed, which doesn't "
"seem to be the case. Please install IceTray; run this example in "
"the GraphNeT Docker container which comes with IceTray "
"installed; or run an example scripts in one of the other folders:"
"\n * examples/02_data/"
"\n * examples/03_weights/"
"\n * examples/04_training/"
"\n * examples/05_pisa/"
"\nExiting."
)

else:
# Parse command-line arguments
parser = ArgumentParser(
description="""
Convert I3 files to an intermediate format.
"""
)

parser.add_argument("backend", choices=["sqlite", "parquet"])
parser.add_argument(
"detector", choices=["icecube-86", "icecube-upgrade"]
)

args = parser.parse_args()

# Run example script
if args.detector == "icecube-86":
main_icecube86(args.backend)
else:
main_icecube_upgrade(args.backend)
Loading

0 comments on commit b2bad25

Please sign in to comment.