Skip to content

Commit

Permalink
add test for 2D CLI (#128)
Browse files Browse the repository at this point in the history
Change default filetype to ome.zarr and bump version

---------

Co-authored-by: Martin Schorb <[email protected]>
Co-authored-by: Constantin Pape <[email protected]>
  • Loading branch information
3 people authored Feb 14, 2024
1 parent 57f6ae2 commit b4a7de6
Show file tree
Hide file tree
Showing 19 changed files with 121 additions and 44 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ tmp*/
*.n5
*.h5
.idea/
test/test-folder/
2 changes: 1 addition & 1 deletion mobie/__version__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__version__ = "0.4.5"
__version__ = "0.4.6"
SPEC_VERSION = "0.3.0"
4 changes: 2 additions & 2 deletions mobie/htm/data_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def _add_sources(dataset_folder, source_names, paths,
def add_images(files, root,
dataset_name, image_names,
resolution, scale_factors, chunks,
key=None, file_format="bdv.n5",
key=None, file_format="ome.zarr",
tmp_folder=None, target="local", max_jobs=multiprocessing.cpu_count(),
unit="micrometer", is_default_dataset=False, is2d=None):
assert len(files) == len(image_names), f"{len(files)}, {len(image_names)}"
Expand All @@ -114,7 +114,7 @@ def add_images(files, root,
def add_segmentations(files, root,
dataset_name, segmentation_names,
resolution, scale_factors, chunks,
key=None, file_format="bdv.n5",
key=None, file_format="ome.zarr",
tmp_folder=None, target="local", max_jobs=multiprocessing.cpu_count(),
add_default_tables=True, unit="micrometer",
is_default_dataset=False, is2d=None):
Expand Down
6 changes: 3 additions & 3 deletions mobie/image_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def add_bdv_image(xml_path, root, dataset_name,
def add_image(input_path, input_key,
root, dataset_name, image_name,
resolution, scale_factors, chunks,
file_format="bdv.n5", menu_name=None,
file_format="ome.zarr", menu_name=None,
tmp_folder=None, target="local",
max_jobs=multiprocessing.cpu_count(),
view=None, transformation=None,
Expand All @@ -185,7 +185,7 @@ def add_image(input_path, input_key,
chunks [list[int]] - chunks for the data.
menu_name [str] - menu name for this source.
If none is given will be created based on the image name. (default: None)
file_format [str] - the file format used to store the data internally (default: bdv.n5)
file_format [str] - the file format used to store the data internally (default: ome.zarr)
tmp_folder [str] - folder for temporary files (default: None)
target [str] - computation target (default: "local")
max_jobs [int] - number of jobs (default: number of cores)
Expand Down Expand Up @@ -234,7 +234,7 @@ def add_image(input_path, input_key,

if move_only:
if int_to_uint:
raise ValueError("Conversio of integer to unsigned integer is not possible with move_only")
raise ValueError("Conversion of integer to unsigned integer is not possible with move_only")
shutil.move(input_path, data_path)
if "bdv." in file_format:
shutil.move(os.path.splitext(input_path)[0]+".xml", image_metadata_path)
Expand Down
4 changes: 2 additions & 2 deletions mobie/import_data/from_node_labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def import_segmentation_from_node_labels(in_path, in_key, out_path,
resolution, scale_factors, chunks,
tmp_folder, target, max_jobs,
block_shape=None, unit="micrometer",
source_name=None, file_format="bdv.n5"):
source_name=None, file_format="ome.zarr"):
""" Import segmentation data into mobie format from a paintera dataset
Arguments:
Expand All @@ -56,7 +56,7 @@ def import_segmentation_from_node_labels(in_path, in_key, out_path,
By default, same as chunks. (default:None)
unit [str] - physical unit of the coordinate system (default: micrometer)
source_name [str] - name of the source (default: None)
file_format [str] - the file format (default: "bdv.n5")
file_format [str] - the file format (default: "ome.zarr")
"""

out_key = get_scale_key(file_format)
Expand Down
4 changes: 2 additions & 2 deletions mobie/import_data/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ def import_image_data(in_path, in_key, out_path,
resolution, scale_factors, chunks,
tmp_folder=None, target="local", max_jobs=mp.cpu_count(),
block_shape=None, unit="micrometer",
source_name=None, file_format="bdv.n5",
source_name=None, file_format="ome.zarr",
int_to_uint=False, channel=None):
""" Import image data to mobie format.
Expand All @@ -24,7 +24,7 @@ def import_image_data(in_path, in_key, out_path,
By default, same as chunks. (default:None)
unit [str] - physical unit of the coordinate system (default: micrometer)
source_name [str] - name of the source (default: None)
file_format [str] - the file format (default: "bdv.n5")
file_format [str] - the file format (default: "ome.zarr")
int_to_uint [bool] - whether to convert signed to unsigned integer (default: False)
channel [int] - the channel to load from the data.
Currently only supported for the ome.zarr format (default: None)
Expand Down
4 changes: 2 additions & 2 deletions mobie/import_data/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ def import_segmentation(in_path, in_key, out_path,
tmp_folder, target, max_jobs,
block_shape=None, with_max_id=True,
unit="micrometer", source_name=None,
file_format="bdv.n5"):
file_format="ome.zarr"):
""" Import segmentation data into mobie format.
Arguments:
Expand All @@ -24,7 +24,7 @@ def import_segmentation(in_path, in_key, out_path,
with_max_id [bool] - whether to add the max id attribute
unit [str] - physical unit of the coordinate system (default: micrometer)
source_name [str] - name of the source (default: None)
file_format [str] - the file format (default: "bdv.n5")
file_format [str] - the file format (default: "ome.zarr")
"""
# we allow 2d data for ome.zarr file format
if file_format != "ome.zarr":
Expand Down
22 changes: 18 additions & 4 deletions mobie/import_data/traces.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@
from tqdm import tqdm


def is_ome_zarr(path):
return path.endswith("ome.zarr")


def get_key_ome_zarr(path):
with open_file(path, "r") as f:
key = f.attrs["multiscales"][0]["datasets"][0]["path"]
return key


def coords_to_vol(coords, nid, radius=5):
bb_min = coords.min(axis=0)
bb_max = coords.max(axis=0) + 1
Expand Down Expand Up @@ -162,14 +172,18 @@ def import_traces(input_folder, out_path,
traces = parse_traces(input_folder)

# check that we are compatible with bdv (ids need to be smaller than int16 max)
max_id = np.iinfo('int16').max
max_id = np.iinfo("int16").max
max_trace_id = max(traces.keys())
if max_trace_id > max_id:
raise RuntimeError("Can't export id %i > %i" % (max_trace_id, max_id))

is_h5 = is_h5py(reference_path)
ref_key = get_key(is_h5, timepoint=0, setup_id=0, scale=reference_scale)
with open_file(reference_path, 'r') as f:
if is_ome_zarr(reference_path):
ref_key = get_key_ome_zarr(reference_path)
else:
is_h5 = is_h5py(reference_path)
ref_key = get_key(is_h5, timepoint=0, setup_id=0, scale=reference_scale)

with open_file(reference_path, "r") as f:
ds = f[ref_key]
shape = ds.shape
if chunks is None:
Expand Down
2 changes: 1 addition & 1 deletion mobie/import_data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def downscale(in_path, in_key, out_path,
resolution, scale_factors, chunks,
tmp_folder, target, max_jobs, block_shape,
library="vigra", library_kwargs=None,
metadata_format="bdv.n5", out_key="",
metadata_format="ome.zarr", out_key="",
unit="micrometer", source_name=None,
roi_begin=None, roi_end=None,
int_to_uint=False, channel=None):
Expand Down
4 changes: 2 additions & 2 deletions mobie/registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def add_registered_source(input_path, input_key, transformation,
root, dataset_name, source_name,
resolution, scale_factors, chunks, method,
menu_name=None, file_format="bdv.n5",
menu_name=None, file_format="ome.zarr",
shape=None, source_type='image',
view=None, add_default_table=True,
fiji_executable=None, elastix_directory=None,
Expand Down Expand Up @@ -46,7 +46,7 @@ def add_registered_source(input_path, input_key, transformation,
'transformix': apply transformation using transformix
menu_name [str] - menu name for this source.
If none is given will be created based on the image name. (default: None)
file_format [str] - the file format used to store the data internally (default: bdv.n5)
file_format [str] - the file format used to store the data internally (default: ome.zarr)
shape [tuple[int]] - shape of the output volume. If None, the shape specified in
the elastix transformation file will be used. (default: None)
source_type [str] - type of the data, can be either 'image', 'segmentation' or 'mask'
Expand Down
2 changes: 1 addition & 1 deletion mobie/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def add_segmentation(input_path, input_key,
root, dataset_name, segmentation_name,
resolution, scale_factors, chunks,
menu_name=None, file_format="bdv.n5",
menu_name=None, file_format="ome.zarr",
node_label_path=None, node_label_key=None,
tmp_folder=None, target="local",
max_jobs=multiprocessing.cpu_count(),
Expand Down
2 changes: 1 addition & 1 deletion mobie/traces.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def add_traces(input_folder, root, dataset_name, traces_name,
scale_factors [list[list[int]]] - scale factors used for down-sampling.
menu_name [str] - menu item for this source.
If none is given will be created based on the image name. (default: None)
file_format [str] - the file format used to store the data internally (default: bdv.n5)
file_format [str] - the file format used to store the data internally (default: ome.zarr)
view [dict] - default view settings for this source (default: None)
chunks [list[int]] - chunks for the data.
max_jobs [int] - number of jobs (default: number of cores)
Expand Down
28 changes: 22 additions & 6 deletions test/import_data/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
class TestImportImage(unittest.TestCase):
test_folder = "./test-folder"
tmp_folder = "./test-folder/tmp"
out_path = "./test-folder/imported-data.n5"
out_path = "./test-folder/imported-data.ome.zarr"
n_jobs = min(4, cpu_count())

def setUp(self):
Expand Down Expand Up @@ -51,6 +51,7 @@ def check_data(self, exp_data, scales, is_h5=False, out_path=None):
self._check_data(exp_data, scale_data, scales)

def check_data_ome_zarr(self, exp_data, scales, out_path, resolution, scale_factors):
out_path = self.out_path if out_path is None else out_path
scale_data = []
with open_file(out_path, "r") as f:

Expand Down Expand Up @@ -96,7 +97,7 @@ def create_h5_input_data(self, shape=3*(64,)):
return test_path, key, data

#
# test imports from different file formats (to default output format = bdv.n5)
# test imports from different file formats (to default output format = ome.zarr)
#

def test_import_tif(self):
Expand All @@ -106,27 +107,31 @@ def test_import_tif(self):

im_folder = os.path.join(self.test_folder, "im-stack")
os.makedirs(im_folder, exist_ok=True)

resolution=(0.25, 1, 1)

for z in range(shape[0]):
path = os.path.join(im_folder, "z_%03i.tif" % z)
imageio.imsave(path, data[z])

scales = [[1, 2, 2], [1, 2, 2], [2, 2, 2]]
import_image_data(im_folder, "*.tif", self.out_path,
resolution=(0.25, 1, 1), chunks=(16, 64, 64),
resolution=resolution, chunks=(16, 64, 64),
scale_factors=scales, tmp_folder=self.tmp_folder,
target="local", max_jobs=self.n_jobs)

self.check_data(data, scales)
self.check_data_ome_zarr(data, scales, self.out_path, resolution, scales)

def test_import_hdf5(self):
from mobie.import_data import import_image_data
test_path, key, data = self.create_h5_input_data()
scales = [[2, 2, 2], [2, 2, 2], [2, 2, 2]]
resolution=(1, 1, 1)
import_image_data(test_path, key, self.out_path,
resolution=(1, 1, 1), chunks=(32, 32, 32),
resolution=resolution, chunks=(32, 32, 32),
scale_factors=scales, tmp_folder=self.tmp_folder,
target="local", max_jobs=self.n_jobs)
self.check_data(data, scales)
self.check_data_ome_zarr(data, scales, self.out_path, resolution, scales)

# TODO
@unittest.skipIf(mrcfile is None, "Need mrcfile")
Expand All @@ -148,6 +153,17 @@ def test_import_bdv_hdf5(self):
target="local", max_jobs=1, file_format="bdv.hdf5")
self.check_data(data, scales, is_h5=True, out_path=out_path)

def test_import_bdv_n5(self):
from mobie.import_data import import_image_data
test_path, key, data = self.create_h5_input_data()
scales = [[2, 2, 2], [2, 2, 2], [2, 2, 2]]
out_path = os.path.join(self.test_folder, "imported_data.n5")
import_image_data(test_path, key, out_path,
resolution=(1, 1, 1), chunks=(32, 32, 32),
scale_factors=scales, tmp_folder=self.tmp_folder,
target="local", max_jobs=1, file_format="bdv.n5")
self.check_data(data, scales, is_h5=False, out_path=out_path)

def test_import_ome_zarr(self):
from mobie.import_data import import_image_data
test_path, key, data = self.create_h5_input_data()
Expand Down
7 changes: 3 additions & 4 deletions test/import_data/test_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@

import numpy as np
from elf.io import open_file
from pybdv.util import get_key
from pybdv.downsample import sample_shape


class TestImportSegmentation(unittest.TestCase):
test_folder = './test-folder'
tmp_folder = './test-folder/tmp'
out_path = './test-folder/imported-data.n5'
out_path = './test-folder/imported-data.ome.zarr'
n_jobs = multiprocessing.cpu_count()

def setUp(self):
Expand All @@ -22,7 +21,7 @@ def tearDown(self):
rmtree(self.test_folder)

def check_seg(self, exp_data, scales):
key = get_key(False, 0, 0, 0)
key = "s0"
with open_file(self.out_path, 'r') as f:
ds = f[key]
data = ds[:]
Expand All @@ -33,7 +32,7 @@ def check_seg(self, exp_data, scales):

exp_shape = data.shape
for scale, scale_facor in enumerate(scales, 1):
key = get_key(False, 0, 0, scale)
key = f"s{scale}"
with open_file(self.out_path, 'r') as f:
self.assertIn(key, f)
this_shape = f[key].shape
Expand Down
Loading

0 comments on commit b4a7de6

Please sign in to comment.