diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3fa4da1..60452e4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,17 +1,37 @@ -# Contributing +# Contributing to BrkRaw -When contributing to this repository, please first discuss the change you wish to make via issue, -email, or any other method with the owners of this repository before making a change. +Thank you for your interest in contributing to BrkRaw! Whether you're tackling a bug, adding a new feature, or improving our documentation, every contribution is appreciated. This guide will help you get started with your contributions in the most effective way. -Please note we have a code of conduct, please follow it in all your interactions with the project. +## Ways to Contribute -## Pull Request Process +### Reporting Issues -1. Ensure any install or build dependencies are removed before the end of the layer when doing a - build. -2. Update the README.md with details of changes to the interface, this includes new environment - variables, exposed ports, useful file locations and container parameters. -3. Increase the version numbers in any examples files and the README.md to the new version that this - Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/). -4. You may merge the Pull Request in once you have the sign-off of two other developers, or if you - do not have permission to do that, you may request the second reviewer to merge it for you. \ No newline at end of file +If you encounter a bug, have a suggestion, or want to make a feature request, please use the Issues section. Include as much detail as possible and label your issue appropriately. + +### Pull Requests + +We welcome pull requests with open arms! Here’s how you can make one: + +- **Code Changes**: If you are updating the BrkRaw codebase, perhaps due to a ParaVision compatibility issue or to suggest a new standard, please make sure your changes are well-documented. +- **New Features**: If you're introducing a new feature, ensure that you include appropriate test scripts in the `tests` directory, following our standard testing workflow. Check our documentation for more details. +- **New Applications**: Contributions that significantly enhance community utility but cannot be integrated via the plugin architecture should be directed to the main BrkRaw package. + +Before creating a pull request, ensure that your code complies with the existing code style and that you have tested your changes locally. + +### Contributing to Child Repositories + +- **[plugin](../brkraw-plugin)**: For new functionalities at the app level, direct your contributions here. +- **[dataset](../brkraw-dataset)**: To add a new dataset that needs to be tested via BrkRaw CI for data conversion consistency and reliability, please contribute here. +- **[tutorial](../brkraw-tutorial)**: For new tutorials, tutorial revisions, or documentation that would help other users, please contribute to this repository. + +## Before You Start + +Please review the documentation and Q&A to see if your question has already been answered or if the feature has already been discussed. If you’re unsure about adding a feature or making a change, open an issue to discuss it first. + +## Contribution Guidelines + +- Ensure your contributions are clear and easy to understand. +- Include any necessary tests and documentation updates. +- Adhere to the coding standards and best practices as outlined in our project documentation. + +We look forward to your contributions and are excited to see what you come up with! \ No newline at end of file diff --git a/Makefile b/Makefile index 30cb7df..3a2516a 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ clean: rm -rf tests/tutorials tests/tutorials: - git clone https://github.com/BrkRaw/tutorials.git tests/tutorials + git clone https://github.com/BrkRaw/brkraw-tutorial.git tests/tutorials tests/tutorials/SampleData/20190724_114946_BRKRAW_1_1: tests/tutorials unzip -uq tests/tutorials/SampleData/20190724_114946_BRKRAW_1_1.zip -d tests/tutorials/SampleData/ diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 5f9ce81..261d4a1 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,4 @@ -from .brkobj import StudyObj +from .data import Study from ..config import ConfigManager -__all__ = [StudyObj, ConfigManager] \ No newline at end of file +__all__ = ['Study', 'ConfigManager'] \ No newline at end of file diff --git a/brkraw/api/analyzer/__init__.py b/brkraw/api/analyzer/__init__.py index 4fed832..dce03d3 100644 --- a/brkraw/api/analyzer/__init__.py +++ b/brkraw/api/analyzer/__init__.py @@ -3,4 +3,4 @@ from .affine import AffineAnalyzer from .dataarray import DataArrayAnalyzer -__all__ = [BaseAnalyzer, ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer] \ No newline at end of file +__all__ = ['BaseAnalyzer', 'ScanInfoAnalyzer', 'AffineAnalyzer', 'DataArrayAnalyzer'] \ No newline at end of file diff --git a/brkraw/api/analyzer/affine.py b/brkraw/api/analyzer/affine.py index 0138667..aa1a677 100644 --- a/brkraw/api/analyzer/affine.py +++ b/brkraw/api/analyzer/affine.py @@ -5,7 +5,7 @@ from copy import copy from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: - from ..brkobj.scan import ScanInfo + from ..data.scan import ScanInfo SLICEORIENT = { diff --git a/brkraw/api/analyzer/base.py b/brkraw/api/analyzer/base.py index 0198b9a..76fa42d 100644 --- a/brkraw/api/analyzer/base.py +++ b/brkraw/api/analyzer/base.py @@ -1,3 +1,3 @@ class BaseAnalyzer: - def vars(self): + def to_dict(self): return self.__dict__ \ No newline at end of file diff --git a/brkraw/api/analyzer/dataarray.py b/brkraw/api/analyzer/dataarray.py index 89a2a12..d435c89 100644 --- a/brkraw/api/analyzer/dataarray.py +++ b/brkraw/api/analyzer/dataarray.py @@ -4,7 +4,7 @@ from copy import copy from typing import TYPE_CHECKING, Union if TYPE_CHECKING: - from ..brkobj import ScanInfo + from ..data import ScanInfo from io import BufferedReader from zipfile import ZipExtFile @@ -33,3 +33,4 @@ def _calc_array_shape(self, infoobj: 'ScanInfo'): def get_dataarray(self): self.buffer.seek(0) return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') + diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 7d152fc..14cf84a 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -36,9 +36,15 @@ def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Option except AttributeError: vals = OrderedDict() setattr(self, p, vals) + try: + fid_buffer = pvobj.get_fid() + except (FileNotFoundError, AttributeError): + fid_buffer = None + setattr(self, 'fid_buffer', fid_buffer) + try: visu_pars = pvobj.get_visu_pars(reco_id) - except FileNotFoundError: + except (FileNotFoundError, AttributeError): visu_pars = OrderedDict() setattr(self, 'visu_pars', visu_pars) @@ -48,6 +54,7 @@ def _parse_info(self): self.info_image = helper.Image(self).get_info() self.info_slicepack = helper.SlicePack(self).get_info() self.info_cycle = helper.Cycle(self).get_info() + self.info_diffusion = helper.Diffusion(self).get_info() if self.info_image['dim'] > 1: self.info_orientation = helper.Orientation(self).get_info() diff --git a/brkraw/api/brkobj/__init__.py b/brkraw/api/brkobj/__init__.py deleted file mode 100644 index 464667c..0000000 --- a/brkraw/api/brkobj/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .study import StudyObj -from .scan import ScanObj, ScanInfo - -__all__ = [StudyObj, ScanObj, ScanInfo] \ No newline at end of file diff --git a/brkraw/api/brkobj/scan.py b/brkraw/api/brkobj/scan.py deleted file mode 100644 index b99372c..0000000 --- a/brkraw/api/brkobj/scan.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations -from typing import Optional -import ctypes -from ..pvobj import PvScan -from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer - - -class ScanInfo(BaseAnalyzer): - def __init__(self): - self.warns = [] - - @property - def num_warns(self): - return len(self.warns) - - -class ScanObj(PvScan): - def __init__(self, pvscan: 'PvScan', reco_id: Optional[int] = None, - loader_address: Optional[int] = None, debug: bool=False): - super().__init__(pvscan._scan_id, - (pvscan._rootpath, pvscan._path), - pvscan._contents, - pvscan._recos) - - self.reco_id = reco_id - self._loader_address = loader_address - self._pvscan_address = id(pvscan) - self.is_debug = debug - self.set_info() - - def set_info(self): - self.info = self.get_info(self.reco_id) - - def get_info(self, reco_id:Optional[int] = None, get_analyzer:bool = False): - infoobj = ScanInfo() - pvscan = self.retrieve_pvscan() - analysed = ScanInfoAnalyzer(pvscan, reco_id, self.is_debug) - - if get_analyzer: - return analysed - for attr_name in dir(analysed): - if 'info_' in attr_name: - attr_vals = getattr(analysed, attr_name) - setattr(infoobj, attr_name.replace('info_', ''), attr_vals) - if attr_vals and attr_vals['warns']: - infoobj.warns.extend(attr_vals['warns']) - return infoobj - - def get_affine_info(self, reco_id:Optional[int] = None): - if reco_id: - info = self.get_info(reco_id) - else: - info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) - return AffineAnalyzer(info) - - def get_data_info(self, reco_id: Optional[int] = None): - reco_id = reco_id or self.avail[0] - recoobj = self.get_reco(reco_id) - fileobj = recoobj.get_2dseq() - info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) - return DataArrayAnalyzer(info, fileobj) - - def get_affine(self, reco_id:Optional[int] = None, - subj_type:Optional[str] = None, subj_position:Optional[str] = None): - return self.get_affine_info(reco_id).get_affine(subj_type, subj_position) - - def get_dataarray(self, reco_id: Optional[int] = None): - return self.get_data_info(reco_id).get_dataarray() - - def retrieve_pvscan(self): - if self._pvscan_address: - return ctypes.cast(self._pvscan_address, ctypes.py_object).value - - def retrieve_loader(self): - if self._loader_address: - return ctypes.cast(self._loader_address, ctypes.py_object).value - \ No newline at end of file diff --git a/brkraw/api/data/__init__.py b/brkraw/api/data/__init__.py new file mode 100644 index 0000000..2b9f2b4 --- /dev/null +++ b/brkraw/api/data/__init__.py @@ -0,0 +1,4 @@ +from .study import Study +from .scan import Scan, ScanInfo + +__all__ = ['Study', 'Scan', 'ScanInfo'] \ No newline at end of file diff --git a/brkraw/api/data/scan.py b/brkraw/api/data/scan.py new file mode 100644 index 0000000..811be91 --- /dev/null +++ b/brkraw/api/data/scan.py @@ -0,0 +1,92 @@ +from __future__ import annotations +from typing import Optional, Union +import ctypes +from ..pvobj import PvScan, PvReco, PvFiles +from ..pvobj.base import BaseBufferHandler +from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer + + +class ScanInfo(BaseAnalyzer): + def __init__(self): + self.warns = [] + + @property + def num_warns(self): + return len(self.warns) + + +class Scan(BaseBufferHandler): + """The Scan class design to interface with analyzer, + + Args: + pvobj (_type_): _description_ + """ + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int] = None, + study_address: Optional[int] = None, debug: bool=False): + self.reco_id = reco_id + self._study_address = study_address + self._pvobj_address = id(pvobj) + self.is_debug = debug + self.set_scaninfo() + + def retrieve_pvobj(self): + if self._pvobj_address: + return ctypes.cast(self._pvobj_address, ctypes.py_object).value + + def retrieve_study(self): + if self._study_address: + return ctypes.cast(self._study_address, ctypes.py_object).value + + def set_scaninfo(self, reco_id:Optional[int] = None): + reco_id = reco_id or self.reco_id + self.info = self.get_scaninfo(reco_id) + + def get_scaninfo(self, reco_id:Optional[int] = None, get_analyzer:bool = False): + infoobj = ScanInfo() + pvobj = self.retrieve_pvobj() + analysed = ScanInfoAnalyzer(pvobj, reco_id, self.is_debug) + + if get_analyzer: + return analysed + for attr_name in dir(analysed): + if 'info_' in attr_name: + attr_vals = getattr(analysed, attr_name) + if warns:= attr_vals.pop('warns', None): + infoobj.warns.extend(warns) + setattr(infoobj, attr_name.replace('info_', ''), attr_vals) + return infoobj + + def get_affine_analyzer(self, reco_id:Optional[int] = None): + if reco_id: + info = self.get_scaninfo(reco_id) + else: + info = self.info if hasattr(self, 'info') else self.get_scaninfo(self.reco_id) + return AffineAnalyzer(info) + + def get_datarray_analyzer(self, reco_id: Optional[int] = None): + reco_id = reco_id or self.reco_id + pvobj = self.retrieve_pvobj() + fileobj = pvobj.get_2dseq(reco_id=reco_id) + self._buffers.append + info = self.info if hasattr(self, 'info') else self.get_scaninfo(reco_id) + return DataArrayAnalyzer(info, fileobj) + + @property + def avail(self): + return self.pvobj.avail + + @property + def pvobj(self): + return self.retrieve_pvobj() + + @property + def about_scan(self): + return self.info.to_dict() + + @property + def about_affine(self): + return self.get_affine_analyzer().to_dict() + + @property + def about_dataarray(self): + return self.get_datarray_analyzer().to_dict() \ No newline at end of file diff --git a/brkraw/api/brkobj/study.py b/brkraw/api/data/study.py similarity index 65% rename from brkraw/api/brkobj/study.py rename to brkraw/api/data/study.py index d38ed19..aa120c7 100644 --- a/brkraw/api/brkobj/study.py +++ b/brkraw/api/data/study.py @@ -1,10 +1,10 @@ from __future__ import annotations -from typing import Dict from ..pvobj import PvDataset -from .scan import ScanObj +from .scan import Scan +from pathlib import Path -class StudyObj(PvDataset): - def __init__(self, path): +class Study(PvDataset): + def __init__(self, path: Path): super().__init__(path) self._parse_header() @@ -13,8 +13,8 @@ def get_scan(self, scan_id, reco_id=None, debug=False): Get a scan object by scan ID. """ pvscan = super().get_scan(scan_id) - return ScanObj(pvscan=pvscan, reco_id=reco_id, - loader_address=id(self), debug=debug) + return Scan(pvobj=pvscan, reco_id=reco_id, + study_address=id(self), debug=debug) def _parse_header(self): if not self.contents or 'subject' not in self.contents['files']: @@ -30,16 +30,17 @@ def _parse_header(self): def avail(self): return super().avail - @property + @property #TODO def info(self): """output all analyzed information""" info = {'header': None, 'scans': {}} if header := self.header: info['header'] = header - for scan_id in self.avail: - info['scans'][scan_id] = {} - scanobj = self.get_scan(scan_id) - for reco_id in scanobj.avail: - info['scans'][scan_id][reco_id] = scanobj.get_info(reco_id).vars() + # for scan_id in self.avail: + # scanobj = self.get_scan(scan_id) + # info['scans'][scan_id] = {'protocol_name': scanobj.info.protocol['protocol_name'], + # 'recos': {}} + # for reco_id in scanobj.avail: + # info['scans'][scan_id]['recos'][reco_id] = scanobj.get_info(reco_id).frame_group return info diff --git a/brkraw/api/helper/__init__.py b/brkraw/api/helper/__init__.py index c2aed94..8df058c 100644 --- a/brkraw/api/helper/__init__.py +++ b/brkraw/api/helper/__init__.py @@ -6,6 +6,8 @@ from .cycle import Cycle from .orientation import Orientation, to_matvec, from_matvec, rotate_affine from .fid import FID +from .diffusion import Diffusion -__all__ = [Protocol, FID, FrameGroup, DataArray, Image, SlicePack, Cycle, Orientation, - to_matvec, from_matvec, rotate_affine] \ No newline at end of file +__all__ = ['Protocol', 'FID', 'FrameGroup', 'DataArray', + 'Image', 'SlicePack', 'Cycle', 'Orientation', 'Diffusion', + 'to_matvec', 'from_matvec', 'rotate_affine'] \ No newline at end of file diff --git a/brkraw/api/helper/cycle.py b/brkraw/api/helper/cycle.py index 60a3dd9..05162ec 100644 --- a/brkraw/api/helper/cycle.py +++ b/brkraw/api/helper/cycle.py @@ -1,6 +1,5 @@ from __future__ import annotations import re -import numpy as np from typing import TYPE_CHECKING from .base import BaseHelper from .frame_group import FrameGroup diff --git a/brkraw/api/helper/diffusion.py b/brkraw/api/helper/diffusion.py new file mode 100644 index 0000000..585c3f8 --- /dev/null +++ b/brkraw/api/helper/diffusion.py @@ -0,0 +1,52 @@ +from __future__ import annotations +import numpy as np +from typing import TYPE_CHECKING +from .base import BaseHelper +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + + +class Diffusion(BaseHelper): + """requires method to parse parameter related to the Diffusion Imaging + + Dependencies: + acqp + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + method = analobj.method + + self.bvals = None + self.bvecs = None + if method: + self._set_params(method) + else: + self._warn("Failed to fetch 'bvals' and 'bvecs' information because the 'method' file is missing from 'analobj'.") + + def _set_params(self, method): + bvals = method.get('PVM_DwEffBval') + bvecs = method.get('PVM_DwGradVec') + if bvals is not None: + self.bvals = np.array([bvals]) if np.size(bvals) < 2 else np.array(bvals) + if bvecs is not None: + self.bvecs = self._L2_norm(bvecs.T) + + @staticmethod + def _L2_norm(bvecs): + # Normalize bvecs + bvecs_axis = 0 + bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) + bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 + bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) + return bvecs + + def get_info(self): + return { + 'bvals': self.bvals, + 'bvecs': self.bvecs, + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/fid.py b/brkraw/api/helper/fid.py index 9baf320..58461fd 100644 --- a/brkraw/api/helper/fid.py +++ b/brkraw/api/helper/fid.py @@ -1,13 +1,13 @@ from __future__ import annotations import numpy as np from typing import TYPE_CHECKING -from .base import BaseHelper, is_all_element_same, BYTEORDER, WORDTYPE +from .base import BaseHelper, BYTEORDER, WORDTYPE if TYPE_CHECKING: from ..analyzer import ScanInfoAnalyzer class FID(BaseHelper): - """requires visu_pars and aqcp to pars parameter related to the dtype of binary files + """requires visu_pars and aqcp to parse parameter related to the dtype of binary files Dependencies: acqp @@ -25,10 +25,9 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): byte_order = f'{acqp["BYTORDA"]}Endian' self.dtype = np.dtype(f'{BYTEORDER[byte_order]}{WORDTYPE[word_type]}') else: - self.fid_dtype = None + self.dtype = None self._warn("Failed to fetch 'fid_dtype' information because the 'acqp' file is missing from 'analobj'.") - def get_info(self): return { 'dtype': self.dtype, diff --git a/brkraw/api/helper/recipe.py b/brkraw/api/helper/recipe.py new file mode 100644 index 0000000..ae91f5e --- /dev/null +++ b/brkraw/api/helper/recipe.py @@ -0,0 +1,97 @@ +from __future__ import annotations +import re +import warnings +from collections import OrderedDict +from typing import TYPE_CHECKING +from .base import BaseHelper +if TYPE_CHECKING: + from typing import Optional, Dict, List, Any + from brkraw.api.analyzer import BaseAnalyzer + +class Recipe(BaseHelper): + def __init__(self, target: 'BaseAnalyzer', recipe: dict, legacy: bool = False, + startup_scripts:Optional[List[str]] = None): + self.target = target + self.recipe = recipe + self.results = OrderedDict() + self.backward_comp = legacy + self.startup_scripts = startup_scripts + self._parse_recipe() + + def _parse_recipe(self): + for key, value in self.recipe.items(): + self.results[key] = self._eval_value(value) + + def _eval_value(self, value: Any): + if isinstance(value, str): + value = self._process_str(value) + elif isinstance(value, list): + value = self._process_list(value) + elif isinstance(value, dict): + value = self._process_dict(value) + return value + + def _legacy_parser(self, param_key: str): + for pars in ['acqp', 'method', 'visu_pars']: + value = getattr(self.target, pars).get(param_key) + if value is not None: + return value + return param_key + + def _process_str(self, str_obj: str): + if self.backward_comp: + return self._legacy_parser(str_obj) + ptrn = r'(?P^[a-zA-Z][a-zA-Z0-9_]*)\.(?P[a-zA-Z][a-zA-Z0-9_]*)' + if matched := re.match(ptrn, str_obj): + attr = getattr(self.target, matched['attr']) + return attr.get(matched['key'], None) + else: + return str_obj + + def _process_list(self, list_obj: List): + for c in list_obj: + processed = self._eval_value(c) + if processed is not None: + return processed + return None + + def _process_dict(self, dict_obj: Dict): + script_cmd = 'Equation' if self.backward_comp else 'script' + if script_cmd in dict_obj.keys(): + return self._process_dict_case_script(dict_obj, script_cmd) + elif 'key' in dict_obj.keys(): + return self._process_dict_case_pick_from_list(dict_obj) + else: + processed = {} + for key, value in dict_obj.items(): + processed[key] = self._eval_value(value) + return processed + + def _process_dict_case_script(self, dict_obj: Dict, script_cmd: List[str]): + script = dict_obj.pop(script_cmd) + for s in self.startup_scripts: + exec(s) + for key, value in dict_obj.items(): + value = self._eval_value(value) + if value == None: + return None + exec(f'global {key}') + exec(f'{key} = {value}') + exec(f"output = {script}", globals(), locals()) + return locals()['output'] + + def _process_dict_case_pick_from_list(self, dict_obj: Dict): + key = dict_obj.pop('key') + value = self._process_str(key) + if not isinstance(value, list): + warnings.warn(f"The value returned from '{key}' is not of type 'list'.", UserWarning) + return None + if 'where' in dict_obj.keys(): + hint = self._eval_value(dict_obj.pop('where')) + return value.index(hint) if hint in value else None + elif 'idx' in dict_obj.keys(): + idx = self._eval_value(dict_obj.pop('idx')) + return value[idx] if idx < len(value) else None + + def get(self): + return self.results \ No newline at end of file diff --git a/brkraw/api/helper/slicepack.py b/brkraw/api/helper/slicepack.py index 36b2b74..d659578 100644 --- a/brkraw/api/helper/slicepack.py +++ b/brkraw/api/helper/slicepack.py @@ -12,6 +12,7 @@ class SlicePack(BaseHelper): Dependencies: FrameGroup Image + method visu_pars Args: @@ -19,6 +20,7 @@ class SlicePack(BaseHelper): """ def __init__(self, analobj: 'ScanInfoAnalyzer'): super().__init__() + method = analobj.method visu_pars = analobj.visu_pars fg_info = analobj.get("info_frame_group") or FrameGroup(analobj).get_info() @@ -45,6 +47,7 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): self.num_slice_packs = num_slice_packs self.num_slices_each_pack = num_slices_each_pack self.slice_distances_each_pack = slice_distances_each_pack + self.slice_order_scheme = method.get("PVM_ObjOrderScheme") disk_slice_order = visu_pars.get("VisuCoreDiskSliceOrder") or 'normal' self.is_reverse = 'reverse' in disk_slice_order @@ -71,16 +74,6 @@ def _parse_legacy(self, visu_pars, fg_info): num_slices_each_pack = [int(shape[slice_fid]/num_slice_packs) for _ in range(num_slice_packs)] else: num_slices_each_pack = [shape[slice_fid]] - - slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] - if len(slice_fg): - if num_slice_packs > 1: - num_slices_each_pack.extend( - int(shape[0] / num_slice_packs) - for _ in range(num_slice_packs) - ) - else: - num_slices_each_pack.append(shape[0]) slice_distances_each_pack = [visu_pars["VisuCoreFrameThickness"] for _ in range(num_slice_packs)] return num_slice_packs, num_slices_each_pack, slice_distances_each_pack @@ -119,6 +112,7 @@ def get_info(self): 'num_slices_each_pack': self.num_slices_each_pack, 'slice_distances_each_pack': self.slice_distances_each_pack, 'slice_distance_unit': 'mm', + 'slice_order_scheme': self.slice_order_scheme, 'reverse_slice_order': self.is_reverse, 'warns': self.warns } \ No newline at end of file diff --git a/brkraw/api/plugin/__init__.py b/brkraw/api/plugin/__init__.py new file mode 100644 index 0000000..a43766b --- /dev/null +++ b/brkraw/api/plugin/__init__.py @@ -0,0 +1,5 @@ +from .aggregator import Aggregator +from .plugged import Plugged +from .preset import Preset + +__all__ = ['Aggregator', 'Plugged', 'Preset'] \ No newline at end of file diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index ed59d4f..76c26cf 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -2,6 +2,6 @@ from .pvscan import PvScan from .pvreco import PvReco from .pvfiles import PvFiles -from .parameters import Parameter +from .parameters import Parameter, Parser -__all__ = [PvDataset, PvScan, PvReco, PvFiles, Parameter] \ No newline at end of file +__all__ = ['PvDataset', 'PvScan', 'PvReco', 'PvFiles', 'Parameter', 'Parser'] \ No newline at end of file diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index 6ed3f3b..6576be1 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -3,8 +3,32 @@ import zipfile from collections import OrderedDict from collections import defaultdict +from typing import TYPE_CHECKING +from pathlib import Path from .parameters import Parameter -from typing import Optional + +if TYPE_CHECKING: + from typing import Optional, Union, List + from io import BufferedReader + from zipfile import ZipExtFile + + +class BaseBufferHandler: + _buffers: Union[List[BufferedReader], List[ZipExtFile]] = [] + def close(self): + if self._buffers: + for b in self._buffers: + if not b.closed: + b.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + # Return False to propagate exceptions, if any + return False + class BaseMethods: """ @@ -26,8 +50,11 @@ class BaseMethods: _rootpath = None _contents = None + def isinstance(self, name): + return self.__class__.__name__ == name + @staticmethod - def _fetch_dir(path): + def _fetch_dir(path: 'Path'): """Searches for directories and files in a given directory and returns the directory structure. Args: @@ -41,15 +68,17 @@ def _fetch_dir(path): - 'file_indexes': An empty list. """ contents = OrderedDict() - abspath = os.path.abspath(path) + abspath = path.absolute() for dirpath, dirnames, filenames in os.walk(abspath): normalized_dirpath = os.path.normpath(dirpath) relative_path = os.path.relpath(normalized_dirpath, abspath) - contents[relative_path] = {'dirs': dirnames, 'files': filenames, 'file_indexes': []} + file_sizes = [os.path.getsize(os.path.join(dirpath, f)) for f in filenames] + contents[relative_path] = {'dirs': dirnames, 'files': filenames, + 'file_indexes': [], 'file_sizes': file_sizes} return contents @staticmethod - def _fetch_zip(path): + def _fetch_zip(path: 'Path'): """Searches for files in a zip file and returns the directory structure and file information. Args: @@ -63,12 +92,13 @@ def _fetch_zip(path): - 'file_indexes': A list of file indexes. """ with zipfile.ZipFile(path) as zip_file: - contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': []}) + contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': [], 'file_sizes': []}) for i, item in enumerate(zip_file.infolist()): if not item.is_dir(): dirpath, filename = os.path.split(item.filename) contents[dirpath]['files'].append(filename) contents[dirpath]['file_indexes'].append(i) + contents[dirpath]['file_sizes'].append(item.file_size) while dirpath: dirpath, dirname = os.path.split(dirpath) if dirname: @@ -156,9 +186,11 @@ def __getattr__(self, key): fileobj = self._open_as_fileobject(file.pop()) if self._is_binary(fileobj): return fileobj - par = Parameter(fileobj.read().decode('UTF-8').split('\n'), + string_list = fileobj.read().decode('UTF-8').split('\n') + fileobj.close() + par = Parameter(string_list, name=key, scan_id=self._scan_id, reco_id=self._reco_id) - return par if par.is_parameter() else fileobj.read().decode('UTF-8').split('\n') + return par if par.is_parameter() else string_list raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") @property @@ -171,7 +203,7 @@ def get_fid(self, scan_id:Optional[int] = None): except KeyError: raise TypeError("Missing required argument: 'scan_id must be provided for {self.__class__.__name__}.") fid_files = ['fid', 'rawdata.job0'] - for fid in ['fid', 'rawdata.job0']: + for fid in fid_files: if fid in pvobj.contents['files']: return getattr(pvobj, fid) raise FileNotFoundError(f"The required file '{' or '.join(fid_files)}' does not exist. " @@ -181,7 +213,8 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): try: if scan_id and hasattr(self, 'get_scan'): pvobj = self.get_scan(scan_id).get_reco(reco_id) - elif reco_id and hasattr(self, 'get_reco'): + elif hasattr(self, 'get_reco'): + reco_id = reco_id or sorted(self.avail).pop(0) pvobj = self.get_reco(reco_id) else: pvobj = self @@ -199,7 +232,6 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): raise FileNotFoundError("The required file '2dseq' does not exist. " "Please check the dataset and ensure the file is in the expected location.") - @staticmethod def _is_binary(fileobj, bytes=512): block = fileobj.read(bytes) diff --git a/brkraw/api/pvobj/parameters.py b/brkraw/api/pvobj/parameters.py index 8a8386c..d50eb45 100644 --- a/brkraw/api/pvobj/parameters.py +++ b/brkraw/api/pvobj/parameters.py @@ -121,6 +121,7 @@ def _set_param(self, params, param_addr, contents): This method is intended to be called internally within the class and does not have direct usage examples. """ addr_diff = np.diff(param_addr) + self._params_key_struct = params self._contents = contents self._header = OrderedDict() self._parameters = OrderedDict() diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvdataset.py index c3fd46e..13b3c9e 100755 --- a/brkraw/api/pvobj/pvdataset.py +++ b/brkraw/api/pvobj/pvdataset.py @@ -1,7 +1,7 @@ -import os import re import zipfile from collections import OrderedDict +from pathlib import Path from .base import BaseMethods from .pvscan import PvScan @@ -23,7 +23,7 @@ class PvDataset(BaseMethods): avail (list): A list of available scans. contents (dict): A dictionary of pvdataset contents. """ - def __init__(self, path, debug=False): + def __init__(self, path: Path, debug: bool=False): """ Initialize the object with the given path and optional debug flag. @@ -47,7 +47,7 @@ def __init__(self, path, debug=False): self._construct() # internal method - def _check_dataset_validity(self, path): + def _check_dataset_validity(self, path: Path): """ Checks the validity of a given dataset path. @@ -64,19 +64,20 @@ def _check_dataset_validity(self, path): Returns: None """ - self._path = os.path.abspath(path) - if not os.path.exists(self._path): + path = Path(path) if isinstance(path, str) else path + self._path: Path = path.absolute() + if not self._path.exists(): raise FileNotFoundError(f"The path '{self._path}' does not exist.") - if os.path.isdir(self._path): + if self._path.is_dir(): self._contents = self._fetch_dir(self._path) self.is_compressed = False - elif os.path.isfile(self._path) and zipfile.is_zipfile(self._path): + elif self._path.is_file() and zipfile.is_zipfile(self._path): self._contents = self._fetch_zip(self._path) self.is_compressed = True else: raise ValueError(f"The path '{self._path}' does not meet the required criteria.") - def _construct(self): # sourcery skip: low-code-quality + def _construct(self): """ Constructs the object by organizing the contents. diff --git a/brkraw/api/pvobj/pvfiles.py b/brkraw/api/pvobj/pvfiles.py index 23503ca..931cb5b 100644 --- a/brkraw/api/pvobj/pvfiles.py +++ b/brkraw/api/pvobj/pvfiles.py @@ -1,8 +1,9 @@ import os from .base import BaseMethods +from pathlib import Path class PvFiles(BaseMethods): - def __init__(self, *files): + def __init__(self, *files: Path): """_summary_ Args: @@ -11,7 +12,7 @@ def __init__(self, *files): """ self.update(*files) - def update(self, *files): + def update(self, *files: Path): self._path = [os.path.abspath(f) for f in files if os.path.exists(f)] self._contents = {"files": [os.path.basename(f) for f in self._path], "dirs": [], diff --git a/brkraw/api/pvobj/pvreco.py b/brkraw/api/pvobj/pvreco.py index 4a7b535..9440801 100644 --- a/brkraw/api/pvobj/pvreco.py +++ b/brkraw/api/pvobj/pvreco.py @@ -1,4 +1,5 @@ import os +import warnings from .base import BaseMethods @@ -55,3 +56,7 @@ def path(self): if self.is_compressed: return path return os.path.join(*path) + + def get_fid(self): + warnings.warn(f'{self.__class__} does not support get_fid method. use Scan- or Study-level object instead') + return None \ No newline at end of file diff --git a/brkraw/api/pvobj/pvscan.py b/brkraw/api/pvobj/pvscan.py index 746bdde..72e0aa3 100644 --- a/brkraw/api/pvobj/pvscan.py +++ b/brkraw/api/pvobj/pvscan.py @@ -1,9 +1,11 @@ from __future__ import annotations import os from collections import OrderedDict -from typing import Optional +from typing import Optional, Tuple, Dict, TYPE_CHECKING from .base import BaseMethods from .pvreco import PvReco +if TYPE_CHECKING: + from pathlib import Path class PvScan(BaseMethods): """ @@ -24,14 +26,18 @@ class PvScan(BaseMethods): avail (list): A list of available items. contents (dict): A dictionary of pvscan contents. """ - def __init__(self, scan_id: Optional[int], pathes, contents=None, recos=None): + def __init__(self, + scan_id: Optional[int], + pathes: Tuple[Path, Path], + contents: Optional[Dict]=None, + recos: Optional[OrderedDict]=None): """ Initialize a Dataset object. Args: scan_id (int): The ID of the scan. pathes (tuple): A tuple containing the root path and the path. - contents (list, optional): The initial contents of the dataset. Defaults to None. + contents (dict, optional): The initial contents of the dataset. Defaults to None. recos (dict, optional): A dictionary of reco objects. Defaults to None. Attributes: @@ -48,12 +54,12 @@ def __init__(self, scan_id: Optional[int], pathes, contents=None, recos=None): self.update(contents) self._recos = OrderedDict(recos) if recos else OrderedDict() - def update(self, contents): + def update(self, contents: Dict): """ Update the contents of the dataset. Args: - contents (list): The new contents of the dataset. + contents (dict): The new contents of the dataset. Returns: None @@ -62,7 +68,7 @@ def update(self, contents): self.is_compressed = True if contents.get('file_indexes') else False self._contents = contents - def set_reco(self, path, reco_id, contents): + def set_reco(self, path: Path, reco_id: int, contents: Dict): """ Set a reco object with the specified path, ID, and contents. @@ -76,7 +82,7 @@ def set_reco(self, path, reco_id, contents): """ self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) - def get_reco(self, reco_id): + def get_reco(self, reco_id: int): """ Get a specific reco object by ID. @@ -91,7 +97,7 @@ def get_reco(self, reco_id): """ return self._recos[reco_id] - def get_visu_pars(self, reco_id=None): + def get_visu_pars(self, reco_id: Optional[int] = None): if reco_id: return getattr(self.get_reco(reco_id), 'visu_pars') elif 'visu_pars' in self.contents['files']: diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index c678c14..31b1e38 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -2,16 +2,12 @@ dependency: bids, plugin """ -import argparse from brkraw import __version__ -from .loader import Loader - -__all__ = [Loader] +from .base import BasePlugin, PvScan, PvReco, PvFiles +from .study import StudyToNifti, ScanToNifti +import argparse -def load(*args, **kwargs): - """Load data in Facade design pattern - """ - Loader() +__all__ = ['BasePlugin', 'StudyToNifti', 'ScanToNifti', 'PvScan', 'PvReco', 'PvFiles'] def main(): """main script allows convert brkraw diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 03777f1..064715c 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -1,15 +1,17 @@ from __future__ import annotations -from enum import Enum import warnings import numpy as np -from io import BufferedReader -from zipfile import ZipExtFile -from brkraw.api.brkobj import ScanObj, ScanInfo -from brkraw.api.analyzer import ScanInfoAnalyzer, DataArrayAnalyzer, AffineAnalyzer +import nibabel as nib +from enum import Enum +from pathlib import Path from .header import Header -from typing import TYPE_CHECKING, Optional, Union +from brkraw.api.pvobj.base import BaseBufferHandler +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from brkraw.api.data import Scan +from typing import TYPE_CHECKING if TYPE_CHECKING: - from pathlib import Path + from typing import Optional, Union + from brkraw.api.plugin import Plugged XYZT_UNITS = \ @@ -22,34 +24,18 @@ class ScaleMode(Enum): HEADER = 2 -class BaseMethods: - info, fileobj = (None, None) - +class BaseMethods(BaseBufferHandler): def set_scale_mode(self, scale_mode:Optional[ScaleMode]=None): if scale_mode: self.scale_mode = scale_mode else: self.scale_mode = ScaleMode.HEADER - - def _set_info(self): - analysed = ScanInfoAnalyzer(self) - infoobj = ScanInfo() - - for attr_name in dir(analysed): - if 'info_' in attr_name: - attr_vals = getattr(analysed, attr_name) - setattr(infoobj, attr_name.replace('info_', ''), attr_vals) - if attr_vals and attr_vals['warns']: - infoobj.warns.extend(attr_vals['warns']) - self.info = infoobj - self.analysed = analysed @staticmethod - def get_dataobj(scanobj:Union['ScanInfo','ScanObj'], - fileobj:Union['BufferedReader', 'ZipExtFile', None] = None, + def get_dataobj(scanobj:'Scan', reco_id:Optional[int] = None, scale_correction:bool = False): - data_dict = BaseMethods.get_data_dict(scanobj, fileobj, reco_id) + data_dict = BaseMethods.get_data_dict(scanobj, reco_id) dataobj = data_dict['data_array'] if scale_correction: try: @@ -62,87 +48,78 @@ def get_dataobj(scanobj:Union['ScanInfo','ScanObj'], return dataobj @staticmethod - def get_affine(scanobj:Union['ScanInfo', 'ScanObj'], reco_id:Optional[int] = None, + def get_affine(scanobj:'Scan', reco_id:Optional[int] = None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): return BaseMethods.get_affine_dict(scanobj, reco_id, subj_type, subj_position)['affine'] @staticmethod - def get_data_dict(scanobj:Union['ScanInfo', 'ScanObj'], - fileobj:Union['BufferedReader', 'ZipExtFile'] = None, + def get_data_dict(scanobj:'Scan', reco_id:Optional[int] = None): - if isinstance(scanobj, ScanObj): - data_info = scanobj.get_data_info(reco_id) - elif isinstance(scanobj, ScanInfo) and isinstance(scanobj, Union[BufferedReader, ZipExtFile]): - data_info = DataArrayAnalyzer(scanobj, fileobj) - else: - raise TypeError( - "Unsupported type for 'scanobj'. Expected 'scanobj' to be an instance of 'ScanObj' or " - "'ScanInfo' combined with either 'BufferedReader' or 'ZipExtFile'. Please check the type of 'scanobj' " - "and ensure it matches the expected conditions." - ) - axis_labels = data_info.shape_desc - dataarray = data_info.get_dataarray() + datarray_analyzer = scanobj.get_datarray_analyzer(reco_id) + axis_labels = datarray_analyzer.shape_desc + dataarray = datarray_analyzer.get_dataarray() slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 if slice_axis != 2: dataarray = np.swapaxes(dataarray, slice_axis, 2) axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] return { 'data_array': dataarray, - 'data_slope': data_info.slope, - 'data_offset': data_info.offset, + 'data_slope': datarray_analyzer.slope, + 'data_offset': datarray_analyzer.offset, 'axis_labels': axis_labels } @staticmethod - def get_affine_dict(scanobj:Union['ScanInfo','ScanObj'], reco_id:Optional[int] = None, + def get_affine_dict(scanobj:'Scan', reco_id:Optional[int] = None, subj_type:Optional[str] = None, subj_position:Optional[str] = None): - if isinstance(scanobj, ScanObj): - affine_info = scanobj.get_affine_info(reco_id) - elif isinstance(scanobj, ScanInfo): - affine_info = AffineAnalyzer(scanobj) - else: - raise TypeError( - "Unsupported type for 'scanobj'. Expected 'scanobj' to be an instance of 'ScanObj' or 'ScanInfo'. " - "Please check the type of 'scanobj' and ensure it matches the expected conditions." - ) - subj_type = subj_type or affine_info.subj_type - subj_position = subj_position or affine_info.subj_position - affine = affine_info.get_affine(subj_type, subj_position) + affine_analyzer = scanobj.get_affine_analyzer(reco_id) + subj_type = subj_type or affine_analyzer.subj_type + subj_position = subj_position or affine_analyzer.subj_position + affine = affine_analyzer.get_affine(subj_type, subj_position) return { "num_slicepacks": len(affine) if isinstance(affine, list) else 1, "affine": affine, "subj_type": subj_type, "subj_position": subj_position } - - @staticmethod - def get_bdata(analobj:'ScanInfoAnalyzer'): - """Extract, format, and return diffusion bval and bvec""" - bvals = np.array(analobj.method.get('PVM_DwEffBval')) - bvecs = np.array(analobj.method.get('PVM_DwGradVec').T) - # Correct for single b-vals - if np.size(bvals) < 2: - bvals = np.array([bvals]) - # Normalize bvecs - bvecs_axis = 0 - bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) - bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 - bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) - return bvals, bvecs - - @staticmethod - def get_bids_metadata(scaninfo:'ScanInfo', bids_recipe:Optional['Path']=None): - print(isinstance(scaninfo, ScanInfo), bids_recipe) @staticmethod - def get_nifti1header(scaninfo:'ScanInfo', scale_mode:Optional['ScaleMode']=None): + def get_nifti1header(scanobj:'Scan', reco_id:Optional[int] = None, + scale_mode:Optional['ScaleMode']=None): + if reco_id: + scanobj.set_scaninfo(reco_id) scale_mode = scale_mode or ScaleMode.HEADER - return Header(scaninfo, scale_mode).get() - - # @staticmethod - # def get_nifti1image(self, scan_id:int, reco_id:int|None=None, - # subj_type:str|None=None, subj_position:str|None=None, - # scale_mode:ScaleMode = ScaleMode.HEADER): - # smode = scale_mode if scale_mode == ScaleMode.APPLY else ScaleMode.NONE - # data_dict = self.get_dataobj(scan_id, reco_id, smode) - # affine_dict = self.get_affine(scan_id, reco_id, subj_type, subj_position) \ No newline at end of file + return Header(scanobj.info, scale_mode).get() + + @staticmethod + def get_nifti1image(scanobj:'Scan', reco_id:Optional[int] = None, + scale_mode:Optional['ScaleMode']=None, + subj_type:Optional[str] = None, subj_position:Optional[str] = None, + plugin:Optional['Plugged']=None, plugin_kws:dict=None): + if plugin and plugin.type == 'tonifti': + with plugin(scanobj, **plugin_kws) as p: + dataobj = p.get_dataobj(bool(scale_mode)) + affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) + header = p.get_nifti1header() + else: + scale_mode = scale_mode or ScaleMode.HEADER + dataobj = BaseMethods.get_dataobj(scanobj, reco_id, bool(scale_mode)) + affine = BaseMethods.get_affine(scanobj, reco_id, subj_type, subj_position) + header = BaseMethods.get_nifti1header(scanobj, reco_id, scale_mode) + return nib.Nifti1Image(dataobj, affine, header) + + +class BasePlugin(Scan, BaseMethods): + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], verbose: bool=False, **kwargs): + super().__init__(pvobj, **kwargs) + self.verbose = verbose + + def close(self): + super().close() + self.clear_cache() + + def clear_cache(self): + for buffer in self._buffers: + file_path = Path(buffer.name) + if file_path.exists(): + file_path.unlink() diff --git a/brkraw/app/tonifti/brkraw.py b/brkraw/app/tonifti/brkraw.py deleted file mode 100644 index 7b359be..0000000 --- a/brkraw/app/tonifti/brkraw.py +++ /dev/null @@ -1,150 +0,0 @@ -from __future__ import annotations -from brkraw.api.brkobj import StudyObj -from .base import BaseMethods, ScaleMode -from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from pathlib import Path - - -class BrkrawToNifti(StudyObj, BaseMethods): - def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): - """_summary_ - - Args: - path (Path): _description_ - scale_mode (ScaleMode , None, optional): _description_. Defaults to None. - """ - - super().__init__(path) - self.set_scale_mode(scale_mode) - self._cache = {} - - def get_scan(self, scan_id:int): - """_summary_ - - Args: - scan_id (int): _description_ - - Returns: - _type_: _description_ - """ - if scan_id not in self._cache.keys(): - self._cache[scan_id] = super().get_scan(scan_id) - return self._cache[scan_id] - - def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - return self.get_scan(scan_id).get_info(reco_id, get_analyzer=True) - - def get_affine(self, scan_id:int, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - subj_type (str , None, optional): _description_. Defaults to None. - subj_position (str , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scanobj = self.get_scan(scan_id) - return super().get_affine(scanobj=scanobj, reco_id=reco_id, subj_type=subj_type, subj_position=subj_position) - - def get_dataobj(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - scale_mode (ScaleMode; , None, optional): _description_. Defaults to None. - - Raises: - ValueError: _description_ - - Returns: - _type_: _description_ - """ - scale_mode = scale_mode or self.scale_mode - scale_correction = False if scale_mode == ScaleMode.HEADER else True - scanobj = self.get_scan(scan_id) - return super().get_dataobj(scanobj=scanobj, fileobj=None, reco_id=reco_id, scale_correction=scale_correction) - - def get_data_dict(self, scan_id:int, reco_id:Optional[int]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scanobj = self.get_scan(scan_id) - return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) - - def get_affine_dict(self, scan_id:int, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - subj_type (str , None, optional): _description_. Defaults to None. - subj_position (str , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scanobj = self.get_scan(scan_id) - return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) - - def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - scale_mode (ScaleMode , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scale_mode = scale_mode or self.scale_mode - scaninfo = self.get_scan(scan_id).get_info(reco_id) - return super().get_nifti1header(scaninfo, scale_mode).get() - - def get_bdata(self, scan_id:int): - """_summary_ - - Args: - scan_id (int): _description_ - - Returns: - _type_: _description_ - """ - analobj = self.get_scan_analyzer(scan_id) - return super().get_bdata(analobj) - - def get_bids_metadata(self, scan_id:int, reco_id:Optional[int]=None, bids_recipe=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - bids_recipe (_type_, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - analobj = self.get_scan_analyzer(scan_id, reco_id) - return super().get_bids_metadata(analobj, bids_recipe) - \ No newline at end of file diff --git a/brkraw/app/tonifti/converter.py b/brkraw/app/tonifti/converter.py deleted file mode 100644 index 22cc79d..0000000 --- a/brkraw/app/tonifti/converter.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - -class Converter: - """ - Data converter to NifTi format, - provide variouse converting mode - the Default is use default - in case of plugin needed, search available plugin (by name of plugin) and run it - the plugin functionality will be implemented using modules in plugin app - - sordino2nii will be first example case of plugin - """ - def __init__(self): - pass - - def save_to(self, output_path): - pass - \ No newline at end of file diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 3611cf2..15ee638 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -3,14 +3,14 @@ from nibabel.nifti1 import Nifti1Header from typing import TYPE_CHECKING, Union if TYPE_CHECKING: - from brkraw.api.brkobj import ScanInfo + from brkraw.api.data import ScanInfo from .base import ScaleMode class Header: def __init__(self, scaninfo:'ScanInfo', scale_mode:Union['ScaleMode', int]): self.info = scaninfo - self.scale_mode = int(scale_mode) + self.scale_mode = int(scale_mode.value) self.nifti1header = Nifti1Header() self.nifti1header.default_x_flip = False self._set_scale_params() @@ -18,8 +18,7 @@ def __init__(self, scaninfo:'ScanInfo', scale_mode:Union['ScaleMode', int]): self._set_time_step() def _set_sliceorder(self): - self.info.slicepack - slice_order_scheme = self.info.method.get("PVM_ObjOrderScheme") + slice_order_scheme = self.info.slicepack['slice_order_scheme'] if slice_order_scheme == 'User_defined_slice_scheme' or slice_order_scheme: slice_code = 0 elif slice_order_scheme == 'Sequential': @@ -43,7 +42,7 @@ def _set_sliceorder(self): self.nifti1header['slice_code'] = slice_code def _set_time_step(self): - if self.info.cycle['num_cycle'] > 1: + if self.info.cycle['num_cycles'] > 1: time_step = self.info.cycle['time_step'] self.nifti1header['pixdim'][4] = time_step num_slices = self.info.slicepack['num_slices_each_pack'][0] @@ -51,8 +50,8 @@ def _set_time_step(self): def _set_scale_params(self): if self.scale_mode == 2: - self.nifti1header['scl_slope'] = self.info.dataarray['2dseq_slope'] - self.nifti1header['scl_inter'] = self.info.dataarray['2dseq_offset'] + self.nifti1header['scl_slope'] = self.info.dataarray['slope'] + self.nifti1header['scl_inter'] = self.info.dataarray['offset'] def get(self): return self.nifti1header \ No newline at end of file diff --git a/brkraw/app/tonifti/loader.py b/brkraw/app/tonifti/loader.py deleted file mode 100644 index 539a070..0000000 --- a/brkraw/app/tonifti/loader.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations -from .brkraw import BrkrawToNifti -from .pvscan import PvScanToNifti -from .pvreco import PvRecoToNifti -from .pvfiles import PvFilesToNifti - -class Loader: - def __init__(self, *args, **kwargs): - pass \ No newline at end of file diff --git a/brkraw/app/tonifti/pvfiles.py b/brkraw/app/tonifti/pvfiles.py deleted file mode 100644 index baf617e..0000000 --- a/brkraw/app/tonifti/pvfiles.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations -from pathlib import Path -from brkraw.api.pvobj import PvFiles -from .base import BaseMethods, ScaleMode - -class PvFilesToNifti(PvFiles, BaseMethods): - def __init__(self, *files): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - super.__init__(*files) - self._set_info() - diff --git a/brkraw/app/tonifti/pvreco.py b/brkraw/app/tonifti/pvreco.py deleted file mode 100644 index bbab6f9..0000000 --- a/brkraw/app/tonifti/pvreco.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations -import os -from pathlib import Path -from brkraw.api.pvobj import PvReco -from .base import BaseMethods - - -class PvRecoToNifti(PvReco, BaseMethods): - def __init__(self, path: 'Path'): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - rootpath, reco_path = os.path.split(path) - _, dirs, files = os.walk(path) - super.__init__(None, reco_path, (rootpath, reco_path), {'dirs':dirs, 'files':files}) - self._set_info() diff --git a/brkraw/app/tonifti/pvscan.py b/brkraw/app/tonifti/pvscan.py deleted file mode 100644 index ffd939e..0000000 --- a/brkraw/app/tonifti/pvscan.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations -import os -from pathlib import Path -from brkraw.api.pvobj import PvScan -from .base import BaseMethods, ScaleMode - - -class PvScanToNifti(PvScan, BaseMethods): - def __init__(self, path:'Path'): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - rootpath, scan_path = os.path.split(path) - _, dirs, files = os.walk(path) - super.__init__(None, scan_path, (rootpath, scan_path), {'dirs':dirs, 'files':files}) - self._set_info() diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py new file mode 100644 index 0000000..989e6d4 --- /dev/null +++ b/brkraw/app/tonifti/scan.py @@ -0,0 +1,110 @@ +from __future__ import annotations +from pathlib import Path +from brkraw.api.data import Scan +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from collections import OrderedDict +from typing import TYPE_CHECKING +from .base import BaseMethods, ScaleMode +if TYPE_CHECKING: + from typing import Union, Optional + from brkraw.api.plugin import Plugged + + +class ScanToNifti(Scan, BaseMethods): + def __init__(self, *paths: Path, scale_mode: Optional[ScaleMode]=None, **kwargs): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + self.scale_mode = scale_mode + if len(paths) == 0: + super().__init__(**kwargs) + else: + + if len(paths) == 1 and paths[0].is_dir(): + abspath = paths[0].absolute() + if contents := self._is_pvscan(abspath): + pvobj = self._construct_pvscan(abspath, contents) + elif contents := self._is_pvreco(abspath): + pvobj = self._construct_pvreco(abspath, contents) + else: + pvobj = PvFiles(*paths) + # self.scanobj = Scan(pvobj=pvobj, reco_id=pvobj._reco_id) + super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) + + + @staticmethod + def _construct_pvscan(path: 'Path', contents: 'OrderedDict') -> 'PvScan': + ref_paths = (path.parent, path.name) + scan_id = int(path.name) if path.name.isdigit() else None + pvscan = PvScan(scan_id, ref_paths, contents) + for reco_path in (path/'pdata').iterdir(): + if contents := ScanToNifti._is_pvreco(reco_path): + reco_id = reco_path.name + pvscan.set_reco(reco_path, reco_id, contents) + return pvscan + + @staticmethod + def _construct_pvreco(path: 'Path', contents: 'OrderedDict') -> 'PvReco': + ref_paths = (path.parent, path.name) + reco_id = int(path.name) if path.name.isdigit() else None + return PvReco(None, reco_id, ref_paths, contents) + + @staticmethod + def _is_pvscan(path: 'Path') -> Union[bool, 'OrderedDict']: + if all([(path/f).exists() for f in ['method', 'acqp', 'pdata']]): + contents = OrderedDict(dirs=[], files=[], file_indexes=[]) + for c in path.iterdir(): + if c.is_dir(): + contents['dirs'].append(c.name) + elif c.is_file(): + contents['files'].append(c.name) + return contents + return False + + @staticmethod + def _is_pvreco(path: 'Path') -> Union[bool, 'OrderedDict']: + if all([(path/f).exists() for f in ['visu_pars', '2dseq']]): + contents = OrderedDict(dirs=[], files=[], file_indexes=[]) + for c in path.iterdir(): + if c.is_dir(): + contents['dirs'].append(c.name) + elif c.is_file(): + contents['files'].append(c.name) + return contents + return False + + def get_affine(self, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None): + return super().get_affine(scanobj=self, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_dataobj(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + scale_mode = scale_mode or self.scale_mode + scale_correction = False if scale_mode == ScaleMode.HEADER else True + if reco_id: + self.set_scaninfo(reco_id) + return super().get_dataobj(scanobj=self, reco_id=reco_id, scale_correction=scale_correction) + + def get_data_dict(self, reco_id:Optional[int]=None): + if reco_id: + self.set_scaninfo(reco_id) + return super().get_data_dict(scanobj=self, reco_id=reco_id) + + def get_affine_dict(self, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): + if reco_id: + self.set_scaninfo(reco_id) + return super().get_affine_dict(scanobj=self, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_nifti1header(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + scale_mode = scale_mode or self.scale_mode + return super().get_nifti1header(self, reco_id, scale_mode).get() + + def get_nifti1image(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None, + plugin:Optional['Plugged']=None, plugin_kws:dict=None): + scale_mode = scale_mode or self.scale_mode + return super().get_nifti1image(self, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) \ No newline at end of file diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py new file mode 100644 index 0000000..3c037c3 --- /dev/null +++ b/brkraw/app/tonifti/study.py @@ -0,0 +1,65 @@ +from __future__ import annotations +from brkraw.api.data import Study +from .base import BaseMethods, ScaleMode +from .scan import ScanToNifti +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from pathlib import Path + from brkraw.api.plugin import Plugged + + +class StudyToNifti(Study, BaseMethods): + def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): + super().__init__(path) + self.set_scale_mode(scale_mode) + self._cache = {} + + def get_scan(self, scan_id:int, reco_id:Optional[int] = None): + if scan_id not in self._cache.keys(): + pvscan = super().get_scan(scan_id).retrieve_pvobj() + self._cache[scan_id] = ScanToNifti(pvobj=pvscan, + reco_id=reco_id, + study_address=id(self)) + return self._cache[scan_id] + + def get_scan_pvobj(self, scan_id:int, reco_id:Optional[int] = None): + return super().get_scan(scan_id).retrieve_pvobj() + + def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): + return self.get_scan(scan_id).get_scaninfo(reco_id, get_analyzer=True) + + def get_affine(self, scan_id:int, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None): + scanobj = self.get_scan(scan_id, reco_id) + return super().get_affine(scanobj=scanobj, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_dataobj(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + scale_mode = scale_mode or self.scale_mode + scale_correction = False if scale_mode == ScaleMode.HEADER else True + scanobj = self.get_scan(scan_id, reco_id) + return super().get_dataobj(scanobj=scanobj, reco_id=reco_id, scale_correction=scale_correction) + + def get_data_dict(self, scan_id:int, reco_id:Optional[int]=None): + scanobj = self.get_scan(scan_id, reco_id) + return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) + + def get_affine_dict(self, scan_id:int, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None): + scanobj = self.get_scan(scan_id, reco_id) + return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + scale_mode = scale_mode or self.scale_mode + scanobj = self.get_scan(scan_id, reco_id) + return super().get_nifti1header(scanobj, scale_mode).get() + + def get_nifti1image(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None, + plugin:Optional['Plugged']=None, plugin_kws:dict=None): + scale_mode = scale_mode or self.scale_mode + scanobj = self.get_scan(scan_id, reco_id) + return super().get_nifti1image(scanobj, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) + \ No newline at end of file diff --git a/brkraw/lib/recon.py b/brkraw/lib/recon.py index 9efec11..383b6ef 100644 --- a/brkraw/lib/recon.py +++ b/brkraw/lib/recon.py @@ -17,7 +17,7 @@ """ from .recoFunctions import phase_rotate, phase_corr, zero_filling -from ..api.brkobj.scan import ScanObj +from ..api.data import Scan import numpy as np import warnings @@ -26,7 +26,8 @@ def reconstruction(scanobj,process='image', **kwargs): # Ensure Scans are Image Based - ACQ_dim_desc = [scanobj.acqp.get('ACQ_dim_desc')] if isinstance(scanobj.acqp.get('ACQ_dim_desc'), str) else scanobj.acqp.get('ACQ_dim_desc') + acqp = scanobj.pvobj.acqp + ACQ_dim_desc = [acqp.get('ACQ_dim_desc')] if isinstance(acqp.get('ACQ_dim_desc'), str) else acqp.get('ACQ_dim_desc') if 'Spectroscopic' in ACQ_dim_desc: warnings.warn('Scan is spectroscopic') process = 'readout' @@ -40,10 +41,11 @@ def reconstruction(scanobj,process='image', **kwargs): return recoObj.reconstruct(rms=kwargs['rms'] if 'rms' in kwargs.keys() else True) class Reconstruction: - def __init__(self, scanobj:'ScanObj', reco_id:'int'=1) -> None: - self.acqp = scanobj.acqp - self.method = scanobj.method - self.fid = scanobj.get_fid() + def __init__(self, scanobj:'Scan', reco_id:'int'=1) -> None: + pvscan = scanobj.pvobj + self.acqp = pvscan.acqp + self.method = pvscan.method + self.fid = pvscan.get_fid() self.CS = True if self.method.get('PVM_EncCS')=='Yes' else False self.NI = self.acqp['NI'] self.NR = self.acqp['NR'] @@ -51,7 +53,7 @@ def __init__(self, scanobj:'ScanObj', reco_id:'int'=1) -> None: self.reco_id = reco_id self.info = scanobj.get_info(self.reco_id) self.protocol = self.info.protocol - self.reco = scanobj.get_reco(self.reco_id).reco + self.reco = pvscan.get_reco(self.reco_id).reco self.supported_protocol = any([True for i in SUPPORTED_PROTOCOLS if i in self.protocol['protocol_name'].lower()]) # 1) Convert Buffer to a np array