diff --git a/experiments/hot_distance/__init__.py b/experiments/hot_distance/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/experiments/hot_distance/distance.py b/experiments/hot_distance/distance.py new file mode 100644 index 000000000..aec0151ad --- /dev/null +++ b/experiments/hot_distance/distance.py @@ -0,0 +1,70 @@ +# %% +from shared import ( + config_store, + trainer_config, + input_resolution, + output_resolution, + architecture_config, + repetitions, + iterations, + validation_interval, + run_command, + DataSplitGenerator, + RunConfig, + Run, + subprocess, +) +from dacapo.experiments.tasks import DistanceTaskConfig + +datasplit_config = DataSplitGenerator.generate_from_csv( + "/nrs/cellmap/rhoadesj/dacapo/experiments/hot_distance/distance_datasplit.csv", + input_resolution, + output_resolution, +).compute() + +datasplit = datasplit_config.datasplit_type(datasplit_config) +viewer = datasplit._neuroglancer() +config_store.store_datasplit_config(datasplit_config) + +task_type = "distance" + +task_config = DistanceTaskConfig( + name="distance_task_4nm", + channels=["mito"], + clip_distance=40.0, + tol_distance=40.0, + scale_factor=80.0, +) +config_store.store_task_config(task_config) + + +# %% + +for i in range(repetitions): + run_name = ("_").join( + [ + "distance", + datasplit_config.name, + task_config.name, + architecture_config.name, + trainer_config.name, + ] + ) + f"__{i}" + run_config = RunConfig( + name=run_name, + datasplit_config=datasplit_config, + task_config=task_config, + architecture_config=architecture_config, + trainer_config=trainer_config, + num_iterations=iterations, + validation_interval=validation_interval, + repetition=i, + start_config=None, + ) + + print(run_config.name) + config_store.store_run_config(run_config) + run = Run(config_store.retrieve_run_config(run_name)) + + # run in parallel + subprocess.run(run_command.format(run_name=run_name, task_type=task_type)) diff --git a/experiments/hot_distance/hot_distance.py b/experiments/hot_distance/hot_distance.py new file mode 100644 index 000000000..d70bd53da --- /dev/null +++ b/experiments/hot_distance/hot_distance.py @@ -0,0 +1,72 @@ +# %% +from shared import ( + config_store, + trainer_config, + input_resolution, + output_resolution, + architecture_config, + repetitions, + iterations, + validation_interval, + run_command, + DataSplitGenerator, + RunConfig, + Run, + subprocess, +) +from dacapo.experiments.tasks import HotDistanceTaskConfig + +datasplit_config = DataSplitGenerator.generate_from_csv( + "/nrs/cellmap/rhoadesj/dacapo/experiments/hot_distance/hot_distance_datasplit.csv", + input_resolution, + output_resolution, +).compute() + +datasplit = datasplit_config.datasplit_type(datasplit_config) +viewer = datasplit._neuroglancer() +config_store.store_datasplit_config(datasplit_config) + +task_type = "hotdistance" + +# %% + +task_config = HotDistanceTaskConfig( + name="hotdistance_task_4nm", + channels=["mito"], + clip_distance=40.0, + tol_distance=40.0, + scale_factor=80.0, +) +config_store.store_task_config(task_config) + + +# %% + +for i in range(repetitions): + run_name = ("_").join( + [ + "hotdistance", + datasplit_config.name, + task_config.name, + architecture_config.name, + trainer_config.name, + ] + ) + f"__{i}" + run_config = RunConfig( + name=run_name, + datasplit_config=datasplit_config, + task_config=task_config, + architecture_config=architecture_config, + trainer_config=trainer_config, + num_iterations=iterations, + validation_interval=validation_interval, + repetition=i, + start_config=None, + ) + + print(run_config.name) + config_store.store_run_config(run_config) + run = Run(config_store.retrieve_run_config(run_name)) + + # run in parallel + subprocess.run(run_command.format(run_name=run_name, task_type=task_type)) diff --git a/experiments/hot_distance/one_hot.py b/experiments/hot_distance/one_hot.py new file mode 100644 index 000000000..40117660f --- /dev/null +++ b/experiments/hot_distance/one_hot.py @@ -0,0 +1,64 @@ +# %% +from shared import ( + config_store, + trainer_config, + input_resolution, + output_resolution, + architecture_config, + repetitions, + iterations, + validation_interval, + run_command, + DataSplitGenerator, + RunConfig, + Run, + subprocess, +) +from dacapo.experiments.tasks import OneHotTaskConfig + +datasplit_config = DataSplitGenerator.generate_from_csv( + "/nrs/cellmap/rhoadesj/dacapo/experiments/hot_distance/hot_distance_datasplit.csv", + input_resolution, + output_resolution, +).compute() + +datasplit = datasplit_config.datasplit_type(datasplit_config) +viewer = datasplit._neuroglancer() +config_store.store_datasplit_config(datasplit_config) + +task_type = "onehot" + +task_config = OneHotTaskConfig(name="onehot_task_4nm", classes=["mito"]) +config_store.store_task_config(task_config) + + +# %% + +for i in range(repetitions): + run_name = ("_").join( + [ + "onehot", + datasplit_config.name, + task_config.name, + architecture_config.name, + trainer_config.name, + ] + ) + f"__{i}" + run_config = RunConfig( + name=run_name, + datasplit_config=datasplit_config, + task_config=task_config, + architecture_config=architecture_config, + trainer_config=trainer_config, + num_iterations=iterations, + validation_interval=validation_interval, + repetition=i, + start_config=None, + ) + + print(run_config.name) + config_store.store_run_config(run_config) + run = Run(config_store.retrieve_run_config(run_name)) + + # run in parallel + subprocess.run(run_command.format(run_name=run_name, task_type=task_type)) diff --git a/experiments/hot_distance/shared.py b/experiments/hot_distance/shared.py new file mode 100644 index 000000000..d57f81b46 --- /dev/null +++ b/experiments/hot_distance/shared.py @@ -0,0 +1,63 @@ +# %% +from dacapo.store.create_store import create_config_store +from dacapo.experiments.datasplits import DataSplitGenerator +from funlib.geometry import Coordinate +from dacapo.experiments.architectures import CNNectomeUNetConfig +from dacapo.experiments.trainers import GunpowderTrainerConfig +from dacapo.experiments.trainers.gp_augments import ( + ElasticAugmentConfig, + GammaAugmentConfig, + IntensityAugmentConfig, + IntensityScaleShiftAugmentConfig, +) +from dacapo.experiments import RunConfig +from dacapo.experiments.run import Run +import subprocess + +config_store = create_config_store() + +input_resolution = Coordinate(8, 8, 8) +output_resolution = Coordinate(4, 4, 4) + +architecture_config = CNNectomeUNetConfig( + name="upsample_unet", + input_shape=Coordinate(216, 216, 216), + eval_shape_increase=Coordinate(72, 72, 72), + fmaps_in=1, + num_fmaps=12, + fmaps_out=72, + fmap_inc_factor=6, + downsample_factors=[(2, 2, 2), (3, 3, 3), (3, 3, 3)], + constant_upsample=True, + upsample_factors=[(2, 2, 2)], +) +config_store.store_architecture_config(architecture_config) + +trainer_config = GunpowderTrainerConfig( + name="trainer", + batch_size=4, + learning_rate=0.0001, + num_data_fetchers=20, + augments=[ + ElasticAugmentConfig( + control_point_spacing=[100, 100, 100], + control_point_displacement_sigma=[10.0, 10.0, 10.0], + rotation_interval=(0.0, 1.5707963267948966), + subsample=8, + uniform_3d_rotation=True, + ), + IntensityAugmentConfig(scale=(0.25, 1.75), shift=(-0.5, 0.35), clip=True), + GammaAugmentConfig(gamma_range=(0.5, 2.0)), + IntensityScaleShiftAugmentConfig(scale=2.0, shift=-1.0), + ], + snapshot_interval=10000, + min_masked=0.05, + clip_raw=False, +) +config_store.store_trainer_config(trainer_config) + +iterations = 300000 +validation_interval = 10000 +repetitions = 3 + +run_command = 'bsub -n 8 -gpu "num=4" -q gpu_tesla -e "train_{task_type}_$(date +%Y%m%d%H%M%S).err" -o "train_{task_type}_$(date +%Y%m%d%H%M%S).out" python dacapo train {run_name}' diff --git a/experiments/hot_distance/train_all.sh b/experiments/hot_distance/train_all.sh new file mode 100644 index 000000000..5a3434f72 --- /dev/null +++ b/experiments/hot_distance/train_all.sh @@ -0,0 +1,3 @@ +python distance.py & +python one_hot.py & +python hot_distance.py & \ No newline at end of file