From d53f44c16759e60e50c3912959e2d68bd166119b Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 24 Apr 2024 13:16:57 -0400 Subject: [PATCH 01/14] [update] minor update for future deidentification support --- brkraw/api/pvobj/__init__.py | 4 ++-- brkraw/api/pvobj/parameters.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index ed59d4f..df7c66c 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -2,6 +2,6 @@ from .pvscan import PvScan from .pvreco import PvReco from .pvfiles import PvFiles -from .parameters import Parameter +from .parameters import Parameter, Parser -__all__ = [PvDataset, PvScan, PvReco, PvFiles, Parameter] \ No newline at end of file +__all__ = [PvDataset, PvScan, PvReco, PvFiles, Parameter, Parser] \ No newline at end of file diff --git a/brkraw/api/pvobj/parameters.py b/brkraw/api/pvobj/parameters.py index 8a8386c..d50eb45 100644 --- a/brkraw/api/pvobj/parameters.py +++ b/brkraw/api/pvobj/parameters.py @@ -121,6 +121,7 @@ def _set_param(self, params, param_addr, contents): This method is intended to be called internally within the class and does not have direct usage examples. """ addr_diff = np.diff(param_addr) + self._params_key_struct = params self._contents = contents self._header = OrderedDict() self._parameters = OrderedDict() From 1a2171823fc7dd6e71e75e7064d725e47c696369 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 24 Apr 2024 14:07:42 -0400 Subject: [PATCH 02/14] [patch] slicepack helper `num_slices_each_pack` duplication in legacy --- brkraw/api/helper/slicepack.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/brkraw/api/helper/slicepack.py b/brkraw/api/helper/slicepack.py index 36b2b74..32e12fe 100644 --- a/brkraw/api/helper/slicepack.py +++ b/brkraw/api/helper/slicepack.py @@ -71,16 +71,6 @@ def _parse_legacy(self, visu_pars, fg_info): num_slices_each_pack = [int(shape[slice_fid]/num_slice_packs) for _ in range(num_slice_packs)] else: num_slices_each_pack = [shape[slice_fid]] - - slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] - if len(slice_fg): - if num_slice_packs > 1: - num_slices_each_pack.extend( - int(shape[0] / num_slice_packs) - for _ in range(num_slice_packs) - ) - else: - num_slices_each_pack.append(shape[0]) slice_distances_each_pack = [visu_pars["VisuCoreFrameThickness"] for _ in range(num_slice_packs)] return num_slice_packs, num_slices_each_pack, slice_distances_each_pack From 7d145dbaaeb11a197d42592e1820193eb0e40719 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 24 Apr 2024 16:16:34 -0400 Subject: [PATCH 03/14] [update] minor update of pvobjs --- brkraw/api/pvobj/base.py | 2 +- brkraw/api/pvobj/pvreco.py | 5 +++++ brkraw/api/pvobj/pvscan.py | 22 ++++++++++++++-------- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index 6ed3f3b..fbee5ff 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -171,7 +171,7 @@ def get_fid(self, scan_id:Optional[int] = None): except KeyError: raise TypeError("Missing required argument: 'scan_id must be provided for {self.__class__.__name__}.") fid_files = ['fid', 'rawdata.job0'] - for fid in ['fid', 'rawdata.job0']: + for fid in fid_files: if fid in pvobj.contents['files']: return getattr(pvobj, fid) raise FileNotFoundError(f"The required file '{' or '.join(fid_files)}' does not exist. " diff --git a/brkraw/api/pvobj/pvreco.py b/brkraw/api/pvobj/pvreco.py index 4a7b535..9440801 100644 --- a/brkraw/api/pvobj/pvreco.py +++ b/brkraw/api/pvobj/pvreco.py @@ -1,4 +1,5 @@ import os +import warnings from .base import BaseMethods @@ -55,3 +56,7 @@ def path(self): if self.is_compressed: return path return os.path.join(*path) + + def get_fid(self): + warnings.warn(f'{self.__class__} does not support get_fid method. use Scan- or Study-level object instead') + return None \ No newline at end of file diff --git a/brkraw/api/pvobj/pvscan.py b/brkraw/api/pvobj/pvscan.py index 746bdde..72e0aa3 100644 --- a/brkraw/api/pvobj/pvscan.py +++ b/brkraw/api/pvobj/pvscan.py @@ -1,9 +1,11 @@ from __future__ import annotations import os from collections import OrderedDict -from typing import Optional +from typing import Optional, Tuple, Dict, TYPE_CHECKING from .base import BaseMethods from .pvreco import PvReco +if TYPE_CHECKING: + from pathlib import Path class PvScan(BaseMethods): """ @@ -24,14 +26,18 @@ class PvScan(BaseMethods): avail (list): A list of available items. contents (dict): A dictionary of pvscan contents. """ - def __init__(self, scan_id: Optional[int], pathes, contents=None, recos=None): + def __init__(self, + scan_id: Optional[int], + pathes: Tuple[Path, Path], + contents: Optional[Dict]=None, + recos: Optional[OrderedDict]=None): """ Initialize a Dataset object. Args: scan_id (int): The ID of the scan. pathes (tuple): A tuple containing the root path and the path. - contents (list, optional): The initial contents of the dataset. Defaults to None. + contents (dict, optional): The initial contents of the dataset. Defaults to None. recos (dict, optional): A dictionary of reco objects. Defaults to None. Attributes: @@ -48,12 +54,12 @@ def __init__(self, scan_id: Optional[int], pathes, contents=None, recos=None): self.update(contents) self._recos = OrderedDict(recos) if recos else OrderedDict() - def update(self, contents): + def update(self, contents: Dict): """ Update the contents of the dataset. Args: - contents (list): The new contents of the dataset. + contents (dict): The new contents of the dataset. Returns: None @@ -62,7 +68,7 @@ def update(self, contents): self.is_compressed = True if contents.get('file_indexes') else False self._contents = contents - def set_reco(self, path, reco_id, contents): + def set_reco(self, path: Path, reco_id: int, contents: Dict): """ Set a reco object with the specified path, ID, and contents. @@ -76,7 +82,7 @@ def set_reco(self, path, reco_id, contents): """ self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) - def get_reco(self, reco_id): + def get_reco(self, reco_id: int): """ Get a specific reco object by ID. @@ -91,7 +97,7 @@ def get_reco(self, reco_id): """ return self._recos[reco_id] - def get_visu_pars(self, reco_id=None): + def get_visu_pars(self, reco_id: Optional[int] = None): if reco_id: return getattr(self.get_reco(reco_id), 'visu_pars') elif 'visu_pars' in self.contents['files']: From c6660a069b0e2c9b9b108168608b1f9c66e1833e Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 25 Apr 2024 08:28:25 -0400 Subject: [PATCH 04/14] [update] studyobj info dict hierarchy --- brkraw/api/brkobj/study.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/brkraw/api/brkobj/study.py b/brkraw/api/brkobj/study.py index d38ed19..e694d5f 100644 --- a/brkraw/api/brkobj/study.py +++ b/brkraw/api/brkobj/study.py @@ -38,8 +38,7 @@ def info(self): if header := self.header: info['header'] = header for scan_id in self.avail: - info['scans'][scan_id] = {} scanobj = self.get_scan(scan_id) - for reco_id in scanobj.avail: - info['scans'][scan_id][reco_id] = scanobj.get_info(reco_id).vars() + info['scans'][scan_id] = scanobj.info.vars() + info['scans'][scan_id]['recos'] = scanobj.avail # TODO: update reco return info From e759f7d75df23eec0e2610e8b5a432541a5786d5 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Fri, 26 Apr 2024 11:41:05 -0400 Subject: [PATCH 05/14] [refactor] Reorganize object hierarchy and rename entities --- brkraw/api/analyzer/affine.py | 2 +- brkraw/api/analyzer/base.py | 2 +- brkraw/api/analyzer/dataarray.py | 3 +- brkraw/api/analyzer/scaninfo.py | 3 +- brkraw/api/brkobj/__init__.py | 4 -- brkraw/api/data/__init__.py | 4 ++ brkraw/api/data/loader.py | 10 ++++ brkraw/api/{brkobj => data}/scan.py | 79 +++++++++++++++++----------- brkraw/api/{brkobj => data}/study.py | 8 +-- brkraw/api/helper/__init__.py | 3 +- brkraw/api/helper/cycle.py | 1 - brkraw/api/helper/diffusion.py | 52 ++++++++++++++++++ brkraw/api/helper/fid.py | 4 +- brkraw/api/helper/slicepack.py | 4 ++ brkraw/api/pvobj/pvdataset.py | 2 +- brkraw/app/tonifti/base.py | 38 ++++--------- brkraw/app/tonifti/brkraw.py | 31 +---------- brkraw/app/tonifti/converter.py | 18 ------- brkraw/app/tonifti/header.py | 5 +- brkraw/app/tonifti/loader.py | 9 ---- 20 files changed, 148 insertions(+), 134 deletions(-) delete mode 100644 brkraw/api/brkobj/__init__.py create mode 100644 brkraw/api/data/__init__.py create mode 100644 brkraw/api/data/loader.py rename brkraw/api/{brkobj => data}/scan.py (50%) rename brkraw/api/{brkobj => data}/study.py (89%) create mode 100644 brkraw/api/helper/diffusion.py delete mode 100644 brkraw/app/tonifti/converter.py delete mode 100644 brkraw/app/tonifti/loader.py diff --git a/brkraw/api/analyzer/affine.py b/brkraw/api/analyzer/affine.py index 0138667..aa1a677 100644 --- a/brkraw/api/analyzer/affine.py +++ b/brkraw/api/analyzer/affine.py @@ -5,7 +5,7 @@ from copy import copy from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: - from ..brkobj.scan import ScanInfo + from ..data.scan import ScanInfo SLICEORIENT = { diff --git a/brkraw/api/analyzer/base.py b/brkraw/api/analyzer/base.py index 0198b9a..76fa42d 100644 --- a/brkraw/api/analyzer/base.py +++ b/brkraw/api/analyzer/base.py @@ -1,3 +1,3 @@ class BaseAnalyzer: - def vars(self): + def to_dict(self): return self.__dict__ \ No newline at end of file diff --git a/brkraw/api/analyzer/dataarray.py b/brkraw/api/analyzer/dataarray.py index 89a2a12..d435c89 100644 --- a/brkraw/api/analyzer/dataarray.py +++ b/brkraw/api/analyzer/dataarray.py @@ -4,7 +4,7 @@ from copy import copy from typing import TYPE_CHECKING, Union if TYPE_CHECKING: - from ..brkobj import ScanInfo + from ..data import ScanInfo from io import BufferedReader from zipfile import ZipExtFile @@ -33,3 +33,4 @@ def _calc_array_shape(self, infoobj: 'ScanInfo'): def get_dataarray(self): self.buffer.seek(0) return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') + diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 7d152fc..5b58af9 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -38,7 +38,7 @@ def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Option setattr(self, p, vals) try: visu_pars = pvobj.get_visu_pars(reco_id) - except FileNotFoundError: + except (FileNotFoundError, AttributeError): visu_pars = OrderedDict() setattr(self, 'visu_pars', visu_pars) @@ -48,6 +48,7 @@ def _parse_info(self): self.info_image = helper.Image(self).get_info() self.info_slicepack = helper.SlicePack(self).get_info() self.info_cycle = helper.Cycle(self).get_info() + self.info_diffusion = helper.Diffusion(self).get_info() if self.info_image['dim'] > 1: self.info_orientation = helper.Orientation(self).get_info() diff --git a/brkraw/api/brkobj/__init__.py b/brkraw/api/brkobj/__init__.py deleted file mode 100644 index 464667c..0000000 --- a/brkraw/api/brkobj/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .study import StudyObj -from .scan import ScanObj, ScanInfo - -__all__ = [StudyObj, ScanObj, ScanInfo] \ No newline at end of file diff --git a/brkraw/api/data/__init__.py b/brkraw/api/data/__init__.py new file mode 100644 index 0000000..5ca29bc --- /dev/null +++ b/brkraw/api/data/__init__.py @@ -0,0 +1,4 @@ +from .study import Study +from .scan import Scan, ScanInfo + +__all__ = [Study, Scan, ScanInfo] \ No newline at end of file diff --git a/brkraw/api/data/loader.py b/brkraw/api/data/loader.py new file mode 100644 index 0000000..eec80a9 --- /dev/null +++ b/brkraw/api/data/loader.py @@ -0,0 +1,10 @@ +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Tuple + from pathlib import Path + + +class Loader: + def __init__(self, *path: Path): + if len(path) == 1: + self.scan = path[0] \ No newline at end of file diff --git a/brkraw/api/brkobj/scan.py b/brkraw/api/data/scan.py similarity index 50% rename from brkraw/api/brkobj/scan.py rename to brkraw/api/data/scan.py index b99372c..f631989 100644 --- a/brkraw/api/brkobj/scan.py +++ b/brkraw/api/data/scan.py @@ -1,7 +1,7 @@ from __future__ import annotations -from typing import Optional +from typing import Optional, Union import ctypes -from ..pvobj import PvScan +from ..pvobj import PvScan, PvReco, PvFiles from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer @@ -12,66 +12,85 @@ def __init__(self): @property def num_warns(self): return len(self.warns) + +class Scan: + """The Scan class design to interface with analyzer, -class ScanObj(PvScan): - def __init__(self, pvscan: 'PvScan', reco_id: Optional[int] = None, + Args: + pvobj (_type_): _description_ + """ + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int] = None, loader_address: Optional[int] = None, debug: bool=False): - super().__init__(pvscan._scan_id, - (pvscan._rootpath, pvscan._path), - pvscan._contents, - pvscan._recos) - self.reco_id = reco_id self._loader_address = loader_address - self._pvscan_address = id(pvscan) + self._pvobj_address = id(pvobj) self.is_debug = debug - self.set_info() + self._set_info() + + def retrieve_pvobj(self): + if self._pvobj_address: + return ctypes.cast(self._pvobj_address, ctypes.py_object).value - def set_info(self): + def retrieve_loader(self): + if self._loader_address: + return ctypes.cast(self._loader_address, ctypes.py_object).value + + def _set_info(self): self.info = self.get_info(self.reco_id) def get_info(self, reco_id:Optional[int] = None, get_analyzer:bool = False): infoobj = ScanInfo() - pvscan = self.retrieve_pvscan() - analysed = ScanInfoAnalyzer(pvscan, reco_id, self.is_debug) + pvobj = self.retrieve_pvobj() + analysed = ScanInfoAnalyzer(pvobj, reco_id, self.is_debug) if get_analyzer: return analysed for attr_name in dir(analysed): if 'info_' in attr_name: attr_vals = getattr(analysed, attr_name) + if warns:= attr_vals.pop('warns', None): + infoobj.warns.extend(warns) setattr(infoobj, attr_name.replace('info_', ''), attr_vals) - if attr_vals and attr_vals['warns']: - infoobj.warns.extend(attr_vals['warns']) return infoobj - def get_affine_info(self, reco_id:Optional[int] = None): + def get_affine_analyzer(self, reco_id:Optional[int] = None): if reco_id: info = self.get_info(reco_id) else: info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) return AffineAnalyzer(info) - def get_data_info(self, reco_id: Optional[int] = None): - reco_id = reco_id or self.avail[0] - recoobj = self.get_reco(reco_id) - fileobj = recoobj.get_2dseq() + def get_datarray_analyzer(self, reco_id: Optional[int] = None): + pvobj = self.retrieve_pvobj() + if isinstance(pvobj, PvScan): + reco_id = reco_id or pvobj.avail[0] + recoobj = pvobj.get_reco(reco_id) + fileobj = recoobj.get_2dseq() + else: + fileobj = pvobj.get_2dseq() info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) return DataArrayAnalyzer(info, fileobj) def get_affine(self, reco_id:Optional[int] = None, subj_type:Optional[str] = None, subj_position:Optional[str] = None): - return self.get_affine_info(reco_id).get_affine(subj_type, subj_position) + return self.get_affine_analyzer(reco_id).get_affine(subj_type, subj_position) def get_dataarray(self, reco_id: Optional[int] = None): - return self.get_data_info(reco_id).get_dataarray() + return self.get_datarray_analyzer(reco_id).get_dataarray() - def retrieve_pvscan(self): - if self._pvscan_address: - return ctypes.cast(self._pvscan_address, ctypes.py_object).value + @property + def pvobj(self): + return self.retrieve_pvobj() - def retrieve_loader(self): - if self._loader_address: - return ctypes.cast(self._loader_address, ctypes.py_object).value - \ No newline at end of file + @property + def about_scan(self): + return self.info.to_dict() + + @property + def about_affine(self): + return self.get_affine_analyzer().to_dict() + + @property + def about_dataarray(self): + return self.get_datarray_analyzer().to_dict() \ No newline at end of file diff --git a/brkraw/api/brkobj/study.py b/brkraw/api/data/study.py similarity index 89% rename from brkraw/api/brkobj/study.py rename to brkraw/api/data/study.py index e694d5f..18edee2 100644 --- a/brkraw/api/brkobj/study.py +++ b/brkraw/api/data/study.py @@ -1,9 +1,9 @@ from __future__ import annotations from typing import Dict from ..pvobj import PvDataset -from .scan import ScanObj +from .scan import Scan -class StudyObj(PvDataset): +class Study(PvDataset): def __init__(self, path): super().__init__(path) self._parse_header() @@ -13,8 +13,8 @@ def get_scan(self, scan_id, reco_id=None, debug=False): Get a scan object by scan ID. """ pvscan = super().get_scan(scan_id) - return ScanObj(pvscan=pvscan, reco_id=reco_id, - loader_address=id(self), debug=debug) + return Scan(pvobj=pvscan, reco_id=reco_id, + loader_address=id(self), debug=debug) def _parse_header(self): if not self.contents or 'subject' not in self.contents['files']: diff --git a/brkraw/api/helper/__init__.py b/brkraw/api/helper/__init__.py index c2aed94..b74c10c 100644 --- a/brkraw/api/helper/__init__.py +++ b/brkraw/api/helper/__init__.py @@ -6,6 +6,7 @@ from .cycle import Cycle from .orientation import Orientation, to_matvec, from_matvec, rotate_affine from .fid import FID +from .diffusion import Diffusion -__all__ = [Protocol, FID, FrameGroup, DataArray, Image, SlicePack, Cycle, Orientation, +__all__ = [Protocol, FID, FrameGroup, DataArray, Image, SlicePack, Cycle, Orientation, Diffusion, to_matvec, from_matvec, rotate_affine] \ No newline at end of file diff --git a/brkraw/api/helper/cycle.py b/brkraw/api/helper/cycle.py index 60a3dd9..05162ec 100644 --- a/brkraw/api/helper/cycle.py +++ b/brkraw/api/helper/cycle.py @@ -1,6 +1,5 @@ from __future__ import annotations import re -import numpy as np from typing import TYPE_CHECKING from .base import BaseHelper from .frame_group import FrameGroup diff --git a/brkraw/api/helper/diffusion.py b/brkraw/api/helper/diffusion.py new file mode 100644 index 0000000..585c3f8 --- /dev/null +++ b/brkraw/api/helper/diffusion.py @@ -0,0 +1,52 @@ +from __future__ import annotations +import numpy as np +from typing import TYPE_CHECKING +from .base import BaseHelper +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + + +class Diffusion(BaseHelper): + """requires method to parse parameter related to the Diffusion Imaging + + Dependencies: + acqp + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + method = analobj.method + + self.bvals = None + self.bvecs = None + if method: + self._set_params(method) + else: + self._warn("Failed to fetch 'bvals' and 'bvecs' information because the 'method' file is missing from 'analobj'.") + + def _set_params(self, method): + bvals = method.get('PVM_DwEffBval') + bvecs = method.get('PVM_DwGradVec') + if bvals is not None: + self.bvals = np.array([bvals]) if np.size(bvals) < 2 else np.array(bvals) + if bvecs is not None: + self.bvecs = self._L2_norm(bvecs.T) + + @staticmethod + def _L2_norm(bvecs): + # Normalize bvecs + bvecs_axis = 0 + bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) + bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 + bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) + return bvecs + + def get_info(self): + return { + 'bvals': self.bvals, + 'bvecs': self.bvecs, + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/fid.py b/brkraw/api/helper/fid.py index 9baf320..295ac2a 100644 --- a/brkraw/api/helper/fid.py +++ b/brkraw/api/helper/fid.py @@ -1,13 +1,13 @@ from __future__ import annotations import numpy as np from typing import TYPE_CHECKING -from .base import BaseHelper, is_all_element_same, BYTEORDER, WORDTYPE +from .base import BaseHelper, BYTEORDER, WORDTYPE if TYPE_CHECKING: from ..analyzer import ScanInfoAnalyzer class FID(BaseHelper): - """requires visu_pars and aqcp to pars parameter related to the dtype of binary files + """requires visu_pars and aqcp to parse parameter related to the dtype of binary files Dependencies: acqp diff --git a/brkraw/api/helper/slicepack.py b/brkraw/api/helper/slicepack.py index 32e12fe..d659578 100644 --- a/brkraw/api/helper/slicepack.py +++ b/brkraw/api/helper/slicepack.py @@ -12,6 +12,7 @@ class SlicePack(BaseHelper): Dependencies: FrameGroup Image + method visu_pars Args: @@ -19,6 +20,7 @@ class SlicePack(BaseHelper): """ def __init__(self, analobj: 'ScanInfoAnalyzer'): super().__init__() + method = analobj.method visu_pars = analobj.visu_pars fg_info = analobj.get("info_frame_group") or FrameGroup(analobj).get_info() @@ -45,6 +47,7 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): self.num_slice_packs = num_slice_packs self.num_slices_each_pack = num_slices_each_pack self.slice_distances_each_pack = slice_distances_each_pack + self.slice_order_scheme = method.get("PVM_ObjOrderScheme") disk_slice_order = visu_pars.get("VisuCoreDiskSliceOrder") or 'normal' self.is_reverse = 'reverse' in disk_slice_order @@ -109,6 +112,7 @@ def get_info(self): 'num_slices_each_pack': self.num_slices_each_pack, 'slice_distances_each_pack': self.slice_distances_each_pack, 'slice_distance_unit': 'mm', + 'slice_order_scheme': self.slice_order_scheme, 'reverse_slice_order': self.is_reverse, 'warns': self.warns } \ No newline at end of file diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvdataset.py index c3fd46e..8f08df2 100755 --- a/brkraw/api/pvobj/pvdataset.py +++ b/brkraw/api/pvobj/pvdataset.py @@ -76,7 +76,7 @@ def _check_dataset_validity(self, path): else: raise ValueError(f"The path '{self._path}' does not meet the required criteria.") - def _construct(self): # sourcery skip: low-code-quality + def _construct(self): """ Constructs the object by organizing the contents. diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 03777f1..15200c5 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -4,12 +4,13 @@ import numpy as np from io import BufferedReader from zipfile import ZipExtFile -from brkraw.api.brkobj import ScanObj, ScanInfo +from brkraw.api.data import Scan, ScanInfo from brkraw.api.analyzer import ScanInfoAnalyzer, DataArrayAnalyzer, AffineAnalyzer from .header import Header -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING if TYPE_CHECKING: from pathlib import Path + from typing import Optional, Union XYZT_UNITS = \ @@ -45,7 +46,7 @@ def _set_info(self): self.analysed = analysed @staticmethod - def get_dataobj(scanobj:Union['ScanInfo','ScanObj'], + def get_dataobj(scanobj:Union['ScanInfo','Scan'], fileobj:Union['BufferedReader', 'ZipExtFile', None] = None, reco_id:Optional[int] = None, scale_correction:bool = False): @@ -62,17 +63,17 @@ def get_dataobj(scanobj:Union['ScanInfo','ScanObj'], return dataobj @staticmethod - def get_affine(scanobj:Union['ScanInfo', 'ScanObj'], reco_id:Optional[int] = None, + def get_affine(scanobj:Union['ScanInfo', 'Scan'], reco_id:Optional[int] = None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): return BaseMethods.get_affine_dict(scanobj, reco_id, subj_type, subj_position)['affine'] @staticmethod - def get_data_dict(scanobj:Union['ScanInfo', 'ScanObj'], + def get_data_dict(scanobj:Union['ScanInfo', 'Scan'], fileobj:Union['BufferedReader', 'ZipExtFile'] = None, reco_id:Optional[int] = None): - if isinstance(scanobj, ScanObj): + if isinstance(scanobj, Scan): data_info = scanobj.get_data_info(reco_id) - elif isinstance(scanobj, ScanInfo) and isinstance(scanobj, Union[BufferedReader, ZipExtFile]): + elif isinstance(scanobj, ScanInfo) and isinstance(fileobj, Union[BufferedReader, ZipExtFile]): data_info = DataArrayAnalyzer(scanobj, fileobj) else: raise TypeError( @@ -94,9 +95,9 @@ def get_data_dict(scanobj:Union['ScanInfo', 'ScanObj'], } @staticmethod - def get_affine_dict(scanobj:Union['ScanInfo','ScanObj'], reco_id:Optional[int] = None, + def get_affine_dict(scanobj:Union['ScanInfo','Scan'], reco_id:Optional[int] = None, subj_type:Optional[str] = None, subj_position:Optional[str] = None): - if isinstance(scanobj, ScanObj): + if isinstance(scanobj, Scan): affine_info = scanobj.get_affine_info(reco_id) elif isinstance(scanobj, ScanInfo): affine_info = AffineAnalyzer(scanobj) @@ -114,25 +115,6 @@ def get_affine_dict(scanobj:Union['ScanInfo','ScanObj'], reco_id:Optional[int] = "subj_type": subj_type, "subj_position": subj_position } - - @staticmethod - def get_bdata(analobj:'ScanInfoAnalyzer'): - """Extract, format, and return diffusion bval and bvec""" - bvals = np.array(analobj.method.get('PVM_DwEffBval')) - bvecs = np.array(analobj.method.get('PVM_DwGradVec').T) - # Correct for single b-vals - if np.size(bvals) < 2: - bvals = np.array([bvals]) - # Normalize bvecs - bvecs_axis = 0 - bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) - bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 - bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) - return bvals, bvecs - - @staticmethod - def get_bids_metadata(scaninfo:'ScanInfo', bids_recipe:Optional['Path']=None): - print(isinstance(scaninfo, ScanInfo), bids_recipe) @staticmethod def get_nifti1header(scaninfo:'ScanInfo', scale_mode:Optional['ScaleMode']=None): diff --git a/brkraw/app/tonifti/brkraw.py b/brkraw/app/tonifti/brkraw.py index 7b359be..19d3051 100644 --- a/brkraw/app/tonifti/brkraw.py +++ b/brkraw/app/tonifti/brkraw.py @@ -1,12 +1,12 @@ from __future__ import annotations -from brkraw.api.brkobj import StudyObj +from brkraw.api.data import Study from .base import BaseMethods, ScaleMode from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from pathlib import Path -class BrkrawToNifti(StudyObj, BaseMethods): +class BrkrawToNifti(Study, BaseMethods): def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): """_summary_ @@ -121,30 +121,3 @@ def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:O scale_mode = scale_mode or self.scale_mode scaninfo = self.get_scan(scan_id).get_info(reco_id) return super().get_nifti1header(scaninfo, scale_mode).get() - - def get_bdata(self, scan_id:int): - """_summary_ - - Args: - scan_id (int): _description_ - - Returns: - _type_: _description_ - """ - analobj = self.get_scan_analyzer(scan_id) - return super().get_bdata(analobj) - - def get_bids_metadata(self, scan_id:int, reco_id:Optional[int]=None, bids_recipe=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - bids_recipe (_type_, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - analobj = self.get_scan_analyzer(scan_id, reco_id) - return super().get_bids_metadata(analobj, bids_recipe) - \ No newline at end of file diff --git a/brkraw/app/tonifti/converter.py b/brkraw/app/tonifti/converter.py deleted file mode 100644 index 22cc79d..0000000 --- a/brkraw/app/tonifti/converter.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - -class Converter: - """ - Data converter to NifTi format, - provide variouse converting mode - the Default is use default - in case of plugin needed, search available plugin (by name of plugin) and run it - the plugin functionality will be implemented using modules in plugin app - - sordino2nii will be first example case of plugin - """ - def __init__(self): - pass - - def save_to(self, output_path): - pass - \ No newline at end of file diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 3611cf2..48003af 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -3,7 +3,7 @@ from nibabel.nifti1 import Nifti1Header from typing import TYPE_CHECKING, Union if TYPE_CHECKING: - from brkraw.api.brkobj import ScanInfo + from brkraw.api.data import ScanInfo from .base import ScaleMode @@ -18,8 +18,7 @@ def __init__(self, scaninfo:'ScanInfo', scale_mode:Union['ScaleMode', int]): self._set_time_step() def _set_sliceorder(self): - self.info.slicepack - slice_order_scheme = self.info.method.get("PVM_ObjOrderScheme") + slice_order_scheme = self.info.slicepack['slice_order_scheme'] if slice_order_scheme == 'User_defined_slice_scheme' or slice_order_scheme: slice_code = 0 elif slice_order_scheme == 'Sequential': diff --git a/brkraw/app/tonifti/loader.py b/brkraw/app/tonifti/loader.py deleted file mode 100644 index 539a070..0000000 --- a/brkraw/app/tonifti/loader.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations -from .brkraw import BrkrawToNifti -from .pvscan import PvScanToNifti -from .pvreco import PvRecoToNifti -from .pvfiles import PvFilesToNifti - -class Loader: - def __init__(self, *args, **kwargs): - pass \ No newline at end of file From 57dd69e62489467451fd56aae5848a14d613d8d1 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 27 Apr 2024 15:22:17 -0400 Subject: [PATCH 06/14] [refactor] object hierarchy and cleaning --- brkraw/api/__init__.py | 4 +- brkraw/api/analyzer/scaninfo.py | 6 ++ brkraw/api/data/loader.py | 10 --- brkraw/api/data/scan.py | 41 +++++------ brkraw/api/data/study.py | 17 +++-- brkraw/api/helper/fid.py | 3 +- brkraw/api/pvobj/base.py | 11 +-- brkraw/api/pvobj/pvdataset.py | 15 ++-- brkraw/api/pvobj/pvfiles.py | 5 +- brkraw/app/tonifti/__init__.py | 8 --- brkraw/app/tonifti/base.py | 109 +++++++++++----------------- brkraw/app/tonifti/brkraw.py | 123 -------------------------------- brkraw/app/tonifti/pvfiles.py | 16 ----- brkraw/app/tonifti/pvreco.py | 19 ----- brkraw/app/tonifti/pvscan.py | 19 ----- brkraw/app/tonifti/scan.py | 108 ++++++++++++++++++++++++++++ brkraw/app/tonifti/study.py | 62 ++++++++++++++++ 17 files changed, 266 insertions(+), 310 deletions(-) delete mode 100644 brkraw/api/data/loader.py delete mode 100644 brkraw/app/tonifti/brkraw.py delete mode 100644 brkraw/app/tonifti/pvfiles.py delete mode 100644 brkraw/app/tonifti/pvreco.py delete mode 100644 brkraw/app/tonifti/pvscan.py create mode 100644 brkraw/app/tonifti/scan.py create mode 100644 brkraw/app/tonifti/study.py diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 5f9ce81..3e44148 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,4 @@ -from .brkobj import StudyObj +from .data import Study from ..config import ConfigManager -__all__ = [StudyObj, ConfigManager] \ No newline at end of file +__all__ = [Study, ConfigManager] \ No newline at end of file diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 5b58af9..14cf84a 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -36,6 +36,12 @@ def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Option except AttributeError: vals = OrderedDict() setattr(self, p, vals) + try: + fid_buffer = pvobj.get_fid() + except (FileNotFoundError, AttributeError): + fid_buffer = None + setattr(self, 'fid_buffer', fid_buffer) + try: visu_pars = pvobj.get_visu_pars(reco_id) except (FileNotFoundError, AttributeError): diff --git a/brkraw/api/data/loader.py b/brkraw/api/data/loader.py deleted file mode 100644 index eec80a9..0000000 --- a/brkraw/api/data/loader.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from typing import Tuple - from pathlib import Path - - -class Loader: - def __init__(self, *path: Path): - if len(path) == 1: - self.scan = path[0] \ No newline at end of file diff --git a/brkraw/api/data/scan.py b/brkraw/api/data/scan.py index f631989..b42c961 100644 --- a/brkraw/api/data/scan.py +++ b/brkraw/api/data/scan.py @@ -21,25 +21,25 @@ class Scan: pvobj (_type_): _description_ """ def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int] = None, - loader_address: Optional[int] = None, debug: bool=False): + study_address: Optional[int] = None, debug: bool=False): self.reco_id = reco_id - self._loader_address = loader_address + self._study_address = study_address self._pvobj_address = id(pvobj) self.is_debug = debug - self._set_info() + self.set_scaninfo() def retrieve_pvobj(self): if self._pvobj_address: return ctypes.cast(self._pvobj_address, ctypes.py_object).value - def retrieve_loader(self): - if self._loader_address: - return ctypes.cast(self._loader_address, ctypes.py_object).value + def retrieve_study(self): + if self._study_address: + return ctypes.cast(self._study_address, ctypes.py_object).value - def _set_info(self): - self.info = self.get_info(self.reco_id) + def set_scaninfo(self): + self.info = self.get_scaninfo(self.reco_id) - def get_info(self, reco_id:Optional[int] = None, get_analyzer:bool = False): + def get_scaninfo(self, reco_id:Optional[int] = None, get_analyzer:bool = False): infoobj = ScanInfo() pvobj = self.retrieve_pvobj() analysed = ScanInfoAnalyzer(pvobj, reco_id, self.is_debug) @@ -56,28 +56,21 @@ def get_info(self, reco_id:Optional[int] = None, get_analyzer:bool = False): def get_affine_analyzer(self, reco_id:Optional[int] = None): if reco_id: - info = self.get_info(reco_id) + info = self.get_scaninfo(reco_id) else: - info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) + info = self.info if hasattr(self, 'info') else self.get_scaninfo(self.reco_id) return AffineAnalyzer(info) def get_datarray_analyzer(self, reco_id: Optional[int] = None): + reco_id = reco_id if reco_id else self.reco_id pvobj = self.retrieve_pvobj() - if isinstance(pvobj, PvScan): - reco_id = reco_id or pvobj.avail[0] - recoobj = pvobj.get_reco(reco_id) - fileobj = recoobj.get_2dseq() - else: - fileobj = pvobj.get_2dseq() - info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) + fileobj = pvobj.get_2dseq(reco_id=reco_id) + info = self.info if hasattr(self, 'info') else self.get_scaninfo(reco_id) return DataArrayAnalyzer(info, fileobj) - def get_affine(self, reco_id:Optional[int] = None, - subj_type:Optional[str] = None, subj_position:Optional[str] = None): - return self.get_affine_analyzer(reco_id).get_affine(subj_type, subj_position) - - def get_dataarray(self, reco_id: Optional[int] = None): - return self.get_datarray_analyzer(reco_id).get_dataarray() + @property + def avail(self): + return self.pvobj.avail @property def pvobj(self): diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index 18edee2..78d5624 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -2,9 +2,10 @@ from typing import Dict from ..pvobj import PvDataset from .scan import Scan +from pathlib import Path class Study(PvDataset): - def __init__(self, path): + def __init__(self, path: Path): super().__init__(path) self._parse_header() @@ -14,7 +15,7 @@ def get_scan(self, scan_id, reco_id=None, debug=False): """ pvscan = super().get_scan(scan_id) return Scan(pvobj=pvscan, reco_id=reco_id, - loader_address=id(self), debug=debug) + study_address=id(self), debug=debug) def _parse_header(self): if not self.contents or 'subject' not in self.contents['files']: @@ -30,15 +31,17 @@ def _parse_header(self): def avail(self): return super().avail - @property + @property #TODO def info(self): """output all analyzed information""" info = {'header': None, 'scans': {}} if header := self.header: info['header'] = header - for scan_id in self.avail: - scanobj = self.get_scan(scan_id) - info['scans'][scan_id] = scanobj.info.vars() - info['scans'][scan_id]['recos'] = scanobj.avail # TODO: update reco + # for scan_id in self.avail: + # scanobj = self.get_scan(scan_id) + # info['scans'][scan_id] = {'protocol_name': scanobj.info.protocol['protocol_name'], + # 'recos': {}} + # for reco_id in scanobj.avail: + # info['scans'][scan_id]['recos'][reco_id] = scanobj.get_info(reco_id).frame_group return info diff --git a/brkraw/api/helper/fid.py b/brkraw/api/helper/fid.py index 295ac2a..58461fd 100644 --- a/brkraw/api/helper/fid.py +++ b/brkraw/api/helper/fid.py @@ -25,10 +25,9 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): byte_order = f'{acqp["BYTORDA"]}Endian' self.dtype = np.dtype(f'{BYTEORDER[byte_order]}{WORDTYPE[word_type]}') else: - self.fid_dtype = None + self.dtype = None self._warn("Failed to fetch 'fid_dtype' information because the 'acqp' file is missing from 'analobj'.") - def get_info(self): return { 'dtype': self.dtype, diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index fbee5ff..5a84e72 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -3,8 +3,10 @@ import zipfile from collections import OrderedDict from collections import defaultdict -from .parameters import Parameter from typing import Optional +from pathlib import Path +from .parameters import Parameter + class BaseMethods: """ @@ -27,7 +29,7 @@ class BaseMethods: _contents = None @staticmethod - def _fetch_dir(path): + def _fetch_dir(path: 'Path'): """Searches for directories and files in a given directory and returns the directory structure. Args: @@ -41,7 +43,7 @@ def _fetch_dir(path): - 'file_indexes': An empty list. """ contents = OrderedDict() - abspath = os.path.abspath(path) + abspath = path.absolute() for dirpath, dirnames, filenames in os.walk(abspath): normalized_dirpath = os.path.normpath(dirpath) relative_path = os.path.relpath(normalized_dirpath, abspath) @@ -49,7 +51,7 @@ def _fetch_dir(path): return contents @staticmethod - def _fetch_zip(path): + def _fetch_zip(path: 'Path'): """Searches for files in a zip file and returns the directory structure and file information. Args: @@ -199,7 +201,6 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): raise FileNotFoundError("The required file '2dseq' does not exist. " "Please check the dataset and ensure the file is in the expected location.") - @staticmethod def _is_binary(fileobj, bytes=512): block = fileobj.read(bytes) diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvdataset.py index 8f08df2..578a990 100755 --- a/brkraw/api/pvobj/pvdataset.py +++ b/brkraw/api/pvobj/pvdataset.py @@ -1,7 +1,7 @@ -import os import re import zipfile from collections import OrderedDict +from pathlib import Path from .base import BaseMethods from .pvscan import PvScan @@ -23,7 +23,7 @@ class PvDataset(BaseMethods): avail (list): A list of available scans. contents (dict): A dictionary of pvdataset contents. """ - def __init__(self, path, debug=False): + def __init__(self, path: Path, debug: bool=False): """ Initialize the object with the given path and optional debug flag. @@ -47,7 +47,7 @@ def __init__(self, path, debug=False): self._construct() # internal method - def _check_dataset_validity(self, path): + def _check_dataset_validity(self, path: Path): """ Checks the validity of a given dataset path. @@ -64,13 +64,14 @@ def _check_dataset_validity(self, path): Returns: None """ - self._path = os.path.abspath(path) - if not os.path.exists(self._path): + path = Path(path) if isinstance(path, str) else path + self._path = path.absolute() + if not self._path.exists(): raise FileNotFoundError(f"The path '{self._path}' does not exist.") - if os.path.isdir(self._path): + if self._path.is_dir(): self._contents = self._fetch_dir(self._path) self.is_compressed = False - elif os.path.isfile(self._path) and zipfile.is_zipfile(self._path): + elif self._path.is_file() and zipfile.is_zipfile(self._path): self._contents = self._fetch_zip(self._path) self.is_compressed = True else: diff --git a/brkraw/api/pvobj/pvfiles.py b/brkraw/api/pvobj/pvfiles.py index 23503ca..931cb5b 100644 --- a/brkraw/api/pvobj/pvfiles.py +++ b/brkraw/api/pvobj/pvfiles.py @@ -1,8 +1,9 @@ import os from .base import BaseMethods +from pathlib import Path class PvFiles(BaseMethods): - def __init__(self, *files): + def __init__(self, *files: Path): """_summary_ Args: @@ -11,7 +12,7 @@ def __init__(self, *files): """ self.update(*files) - def update(self, *files): + def update(self, *files: Path): self._path = [os.path.abspath(f) for f in files if os.path.exists(f)] self._contents = {"files": [os.path.basename(f) for f in self._path], "dirs": [], diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index c678c14..a004236 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -4,14 +4,6 @@ """ import argparse from brkraw import __version__ -from .loader import Loader - -__all__ = [Loader] - -def load(*args, **kwargs): - """Load data in Facade design pattern - """ - Loader() def main(): """main script allows convert brkraw diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 15200c5..6217cbf 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -1,16 +1,15 @@ from __future__ import annotations -from enum import Enum import warnings import numpy as np -from io import BufferedReader -from zipfile import ZipExtFile -from brkraw.api.data import Scan, ScanInfo -from brkraw.api.analyzer import ScanInfoAnalyzer, DataArrayAnalyzer, AffineAnalyzer -from .header import Header +import nibabel as nib +from enum import Enum from typing import TYPE_CHECKING +from .header import Header + if TYPE_CHECKING: - from pathlib import Path - from typing import Optional, Union + from typing import Optional + from brkraw.api.data import Scan + from brkraw.api.plugin import Plugged XYZT_UNITS = \ @@ -24,33 +23,17 @@ class ScaleMode(Enum): class BaseMethods: - info, fileobj = (None, None) - def set_scale_mode(self, scale_mode:Optional[ScaleMode]=None): if scale_mode: self.scale_mode = scale_mode else: self.scale_mode = ScaleMode.HEADER - - def _set_info(self): - analysed = ScanInfoAnalyzer(self) - infoobj = ScanInfo() - - for attr_name in dir(analysed): - if 'info_' in attr_name: - attr_vals = getattr(analysed, attr_name) - setattr(infoobj, attr_name.replace('info_', ''), attr_vals) - if attr_vals and attr_vals['warns']: - infoobj.warns.extend(attr_vals['warns']) - self.info = infoobj - self.analysed = analysed @staticmethod - def get_dataobj(scanobj:Union['ScanInfo','Scan'], - fileobj:Union['BufferedReader', 'ZipExtFile', None] = None, + def get_dataobj(scanobj:'Scan', reco_id:Optional[int] = None, scale_correction:bool = False): - data_dict = BaseMethods.get_data_dict(scanobj, fileobj, reco_id) + data_dict = BaseMethods.get_data_dict(scanobj, reco_id) dataobj = data_dict['data_array'] if scale_correction: try: @@ -63,52 +46,34 @@ def get_dataobj(scanobj:Union['ScanInfo','Scan'], return dataobj @staticmethod - def get_affine(scanobj:Union['ScanInfo', 'Scan'], reco_id:Optional[int] = None, + def get_affine(scanobj:'Scan', reco_id:Optional[int] = None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): return BaseMethods.get_affine_dict(scanobj, reco_id, subj_type, subj_position)['affine'] @staticmethod - def get_data_dict(scanobj:Union['ScanInfo', 'Scan'], - fileobj:Union['BufferedReader', 'ZipExtFile'] = None, + def get_data_dict(scanobj:'Scan', reco_id:Optional[int] = None): - if isinstance(scanobj, Scan): - data_info = scanobj.get_data_info(reco_id) - elif isinstance(scanobj, ScanInfo) and isinstance(fileobj, Union[BufferedReader, ZipExtFile]): - data_info = DataArrayAnalyzer(scanobj, fileobj) - else: - raise TypeError( - "Unsupported type for 'scanobj'. Expected 'scanobj' to be an instance of 'ScanObj' or " - "'ScanInfo' combined with either 'BufferedReader' or 'ZipExtFile'. Please check the type of 'scanobj' " - "and ensure it matches the expected conditions." - ) - axis_labels = data_info.shape_desc - dataarray = data_info.get_dataarray() + datarray_analyzer = scanobj.get_datarray_analyzer(reco_id) + axis_labels = datarray_analyzer.shape_desc + dataarray = datarray_analyzer.get_dataarray() slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 if slice_axis != 2: dataarray = np.swapaxes(dataarray, slice_axis, 2) axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] return { 'data_array': dataarray, - 'data_slope': data_info.slope, - 'data_offset': data_info.offset, + 'data_slope': datarray_analyzer.slope, + 'data_offset': datarray_analyzer.offset, 'axis_labels': axis_labels } @staticmethod - def get_affine_dict(scanobj:Union['ScanInfo','Scan'], reco_id:Optional[int] = None, + def get_affine_dict(scanobj:'Scan', reco_id:Optional[int] = None, subj_type:Optional[str] = None, subj_position:Optional[str] = None): - if isinstance(scanobj, Scan): - affine_info = scanobj.get_affine_info(reco_id) - elif isinstance(scanobj, ScanInfo): - affine_info = AffineAnalyzer(scanobj) - else: - raise TypeError( - "Unsupported type for 'scanobj'. Expected 'scanobj' to be an instance of 'ScanObj' or 'ScanInfo'. " - "Please check the type of 'scanobj' and ensure it matches the expected conditions." - ) - subj_type = subj_type or affine_info.subj_type - subj_position = subj_position or affine_info.subj_position - affine = affine_info.get_affine(subj_type, subj_position) + affine_analyzer = scanobj.get_affine_analyzer(reco_id) + subj_type = subj_type or affine_analyzer.subj_type + subj_position = subj_position or affine_analyzer.subj_position + affine = affine_analyzer.get_affine(subj_type, subj_position) return { "num_slicepacks": len(affine) if isinstance(affine, list) else 1, "affine": affine, @@ -117,14 +82,26 @@ def get_affine_dict(scanobj:Union['ScanInfo','Scan'], reco_id:Optional[int] = No } @staticmethod - def get_nifti1header(scaninfo:'ScanInfo', scale_mode:Optional['ScaleMode']=None): + def get_nifti1header(scanobj:'Scan', reco_id:Optional[int] = None, + scale_mode:Optional['ScaleMode']=None): + if reco_id: + scanobj.set_scaninfo(reco_id) scale_mode = scale_mode or ScaleMode.HEADER - return Header(scaninfo, scale_mode).get() - - # @staticmethod - # def get_nifti1image(self, scan_id:int, reco_id:int|None=None, - # subj_type:str|None=None, subj_position:str|None=None, - # scale_mode:ScaleMode = ScaleMode.HEADER): - # smode = scale_mode if scale_mode == ScaleMode.APPLY else ScaleMode.NONE - # data_dict = self.get_dataobj(scan_id, reco_id, smode) - # affine_dict = self.get_affine(scan_id, reco_id, subj_type, subj_position) \ No newline at end of file + return Header(scanobj.info, scale_mode).get() + + @staticmethod + def get_nifti1image(scanobj:'Scan', reco_id:Optional[int] = None, + scale_mode:Optional['ScaleMode']=None, + subj_type:Optional[str] = None, subj_position:Optional[str] = None, + plugin:Optional['Plugged']=None, plugin_kws:dict=None): + if plugin and plugin.type == 'tonifti': + with plugin(scanobj, subj_type=subj_type, subj_position=subj_position, **plugin_kws) as p: + dataobj = p.get_dataobj() + affine = p.get_affine() + header = p.get_nifti1header() + else: + scale_mode = scale_mode or ScaleMode.HEADER + dataobj = BaseMethods.get_dataobj(scanobj, reco_id, bool(scale_mode)) + affine = BaseMethods.get_affine(scanobj, reco_id, subj_type, subj_position) + header = BaseMethods.get_nifti1header(scanobj, scale_mode) + return nib.Nifti1Image(dataobj, affine, header) \ No newline at end of file diff --git a/brkraw/app/tonifti/brkraw.py b/brkraw/app/tonifti/brkraw.py deleted file mode 100644 index 19d3051..0000000 --- a/brkraw/app/tonifti/brkraw.py +++ /dev/null @@ -1,123 +0,0 @@ -from __future__ import annotations -from brkraw.api.data import Study -from .base import BaseMethods, ScaleMode -from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from pathlib import Path - - -class BrkrawToNifti(Study, BaseMethods): - def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): - """_summary_ - - Args: - path (Path): _description_ - scale_mode (ScaleMode , None, optional): _description_. Defaults to None. - """ - - super().__init__(path) - self.set_scale_mode(scale_mode) - self._cache = {} - - def get_scan(self, scan_id:int): - """_summary_ - - Args: - scan_id (int): _description_ - - Returns: - _type_: _description_ - """ - if scan_id not in self._cache.keys(): - self._cache[scan_id] = super().get_scan(scan_id) - return self._cache[scan_id] - - def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - return self.get_scan(scan_id).get_info(reco_id, get_analyzer=True) - - def get_affine(self, scan_id:int, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - subj_type (str , None, optional): _description_. Defaults to None. - subj_position (str , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scanobj = self.get_scan(scan_id) - return super().get_affine(scanobj=scanobj, reco_id=reco_id, subj_type=subj_type, subj_position=subj_position) - - def get_dataobj(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - scale_mode (ScaleMode; , None, optional): _description_. Defaults to None. - - Raises: - ValueError: _description_ - - Returns: - _type_: _description_ - """ - scale_mode = scale_mode or self.scale_mode - scale_correction = False if scale_mode == ScaleMode.HEADER else True - scanobj = self.get_scan(scan_id) - return super().get_dataobj(scanobj=scanobj, fileobj=None, reco_id=reco_id, scale_correction=scale_correction) - - def get_data_dict(self, scan_id:int, reco_id:Optional[int]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scanobj = self.get_scan(scan_id) - return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) - - def get_affine_dict(self, scan_id:int, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - subj_type (str , None, optional): _description_. Defaults to None. - subj_position (str , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scanobj = self.get_scan(scan_id) - return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) - - def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): - """_summary_ - - Args: - scan_id (int): _description_ - reco_id (int , None, optional): _description_. Defaults to None. - scale_mode (ScaleMode , None, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - scale_mode = scale_mode or self.scale_mode - scaninfo = self.get_scan(scan_id).get_info(reco_id) - return super().get_nifti1header(scaninfo, scale_mode).get() diff --git a/brkraw/app/tonifti/pvfiles.py b/brkraw/app/tonifti/pvfiles.py deleted file mode 100644 index baf617e..0000000 --- a/brkraw/app/tonifti/pvfiles.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations -from pathlib import Path -from brkraw.api.pvobj import PvFiles -from .base import BaseMethods, ScaleMode - -class PvFilesToNifti(PvFiles, BaseMethods): - def __init__(self, *files): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - super.__init__(*files) - self._set_info() - diff --git a/brkraw/app/tonifti/pvreco.py b/brkraw/app/tonifti/pvreco.py deleted file mode 100644 index bbab6f9..0000000 --- a/brkraw/app/tonifti/pvreco.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations -import os -from pathlib import Path -from brkraw.api.pvobj import PvReco -from .base import BaseMethods - - -class PvRecoToNifti(PvReco, BaseMethods): - def __init__(self, path: 'Path'): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - rootpath, reco_path = os.path.split(path) - _, dirs, files = os.walk(path) - super.__init__(None, reco_path, (rootpath, reco_path), {'dirs':dirs, 'files':files}) - self._set_info() diff --git a/brkraw/app/tonifti/pvscan.py b/brkraw/app/tonifti/pvscan.py deleted file mode 100644 index ffd939e..0000000 --- a/brkraw/app/tonifti/pvscan.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations -import os -from pathlib import Path -from brkraw.api.pvobj import PvScan -from .base import BaseMethods, ScaleMode - - -class PvScanToNifti(PvScan, BaseMethods): - def __init__(self, path:'Path'): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - rootpath, scan_path = os.path.split(path) - _, dirs, files = os.walk(path) - super.__init__(None, scan_path, (rootpath, scan_path), {'dirs':dirs, 'files':files}) - self._set_info() diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py new file mode 100644 index 0000000..db8d46c --- /dev/null +++ b/brkraw/app/tonifti/scan.py @@ -0,0 +1,108 @@ +from __future__ import annotations +from pathlib import Path +from brkraw.api.data import Scan +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from collections import OrderedDict +from typing import TYPE_CHECKING +from .base import BaseMethods, ScaleMode +if TYPE_CHECKING: + from typing import Union, Optional + from brkraw.api.plugin import Plugged + + +class ScanToNifti(Scan, BaseMethods): + def __init__(self, *paths: Path, **kwargs): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + if len(paths) == 0: + super().__init__(**kwargs) + if len(paths) == 1 and paths[0].is_dir(): + abspath = paths[0].absolute() + if contents := self._is_pvscan(abspath): + pvobj = self._construct_pvscan(abspath, contents) + elif contents := self._is_pvreco(abspath): + pvobj = self._construct_pvreco(abspath, contents) + else: + pvobj = PvFiles(*paths) + super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) + + @staticmethod + def _construct_pvscan(path: 'Path', contents: 'OrderedDict') -> 'PvScan': + ref_paths = (path.parent, path.name) + scan_id = int(path.name) if path.name.isdigit() else None + pvscan = PvScan(scan_id, ref_paths, contents) + for reco_path in (path/'pdata').iterdir(): + if contents := ScanToNifti._is_pvreco(reco_path): + reco_id = reco_path.name + pvscan.set_reco(reco_path, reco_id, contents) + return pvscan + + @staticmethod + def _construct_pvreco(path: 'Path', contents: 'OrderedDict') -> 'PvReco': + ref_paths = (path.parent, path.name) + reco_id = int(path.name) if path.name.isdigit() else None + return PvReco(None, reco_id, ref_paths, contents) + + @staticmethod + def _is_pvscan(path: 'Path') -> Union[bool, 'OrderedDict']: + if all([(path/f).exists() for f in ['method', 'acqp', 'pdata']]): + contents = OrderedDict(dirs=[], files=[], file_indexes=[]) + for c in path.iterdir(): + if c.is_dir(): + contents['dirs'].append(c.name) + elif c.is_file(): + contents['files'].append(c.name) + return contents + return False + + @staticmethod + def _is_pvreco(path: 'Path') -> Union[bool, 'OrderedDict']: + if all([(path/f).exists() for f in ['visu_pars', '2dseq']]): + contents = OrderedDict(dirs=[], files=[], file_indexes=[]) + for c in path.iterdir(): + if c.is_dir(): + contents['dirs'].append(c.name) + elif c.is_file(): + contents['files'].append(c.name) + return contents + return False + + def get_affine(self, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None): + return super().get_affine(scanobj=self, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_dataobj(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + scale_mode = scale_mode or self.scale_mode + scale_correction = False if scale_mode == ScaleMode.HEADER else True + if reco_id: + self._set_scaninfo(reco_id) + return super().get_dataobj(scanobj=self, reco_id=reco_id, scale_correction=scale_correction) + + def get_data_dict(self, reco_id:Optional[int]=None): + if reco_id: + self._set_scaninfo(reco_id) + return super().get_data_dict(scanobj=self, reco_id=reco_id) + + def get_affine_dict(self, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): + if reco_id: + self._set_scaninfo(reco_id) + return super().get_affine_dict(scanobj=self, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_nifti1header(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + scale_mode = scale_mode or self.scale_mode + if reco_id: + self._set_scaninfo(reco_id) + return super().get_nifti1header(self, scale_mode).get() + + def get_nifti1image(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None, + plugin:Optional['Plugged']=None, plugin_kws:dict=None): + scale_mode = scale_mode or self.scale_mode + scanobj = self.get_scan(scan_id, reco_id) + return super().get_nifti1image(scanobj, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) \ No newline at end of file diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py new file mode 100644 index 0000000..f3772b9 --- /dev/null +++ b/brkraw/app/tonifti/study.py @@ -0,0 +1,62 @@ +from __future__ import annotations +from brkraw.api.data import Study +from .base import BaseMethods, ScaleMode +from .scan import ScanToNifti +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from pathlib import Path + from brkraw.api.plugin import Plugged + + +class StudyToNifti(Study, BaseMethods): + def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): + super().__init__(path) + self.set_scale_mode(scale_mode) + self._cache = {} + + def get_scan(self, scan_id:int, reco_id:Optional[int] = None): + if scan_id not in self._cache.keys(): + pvscan = super().get_scan(scan_id).retrieve_pvobj() + self._cache[scan_id] = ScanToNifti(pvobj=pvscan, + reco_id=reco_id, + study_address=id(self)) + return self._cache[scan_id] + + def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): + return self.get_scan(scan_id).get_scaninfo(reco_id, get_analyzer=True) + + def get_affine(self, scan_id:int, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None): + scanobj = self.get_scan(scan_id, reco_id) + return super().get_affine(scanobj=scanobj, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_dataobj(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + scale_mode = scale_mode or self.scale_mode + scale_correction = False if scale_mode == ScaleMode.HEADER else True + scanobj = self.get_scan(scan_id, reco_id) + return super().get_dataobj(scanobj=scanobj, reco_id=reco_id, scale_correction=scale_correction) + + def get_data_dict(self, scan_id:int, reco_id:Optional[int]=None): + scanobj = self.get_scan(scan_id, reco_id) + return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) + + def get_affine_dict(self, scan_id:int, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None): + scanobj = self.get_scan(scan_id, reco_id) + return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + scale_mode = scale_mode or self.scale_mode + scanobj = self.get_scan(scan_id, reco_id) + return super().get_nifti1header(scanobj, scale_mode).get() + + def get_nifti1image(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, + subj_type:Optional[str]=None, subj_position:Optional[str]=None, + plugin:Optional['Plugged']=None, plugin_kws:dict=None): + scale_mode = scale_mode or self.scale_mode + scanobj = self.get_scan(scan_id, reco_id) + return super().get_nifti1image(scanobj, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) + \ No newline at end of file From f82092a9afffe47857c1a85df0ec7a66f68c60b4 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 17:27:36 -0400 Subject: [PATCH 07/14] [hotpatch] __all__ elements, obj -> str --- brkraw/api/__init__.py | 2 +- brkraw/api/analyzer/__init__.py | 2 +- brkraw/api/data/__init__.py | 2 +- brkraw/api/helper/__init__.py | 5 +++-- brkraw/api/plugin/__init__.py | 5 +++++ brkraw/api/pvobj/__init__.py | 2 +- brkraw/app/tonifti/__init__.py | 6 +++++- 7 files changed, 17 insertions(+), 7 deletions(-) create mode 100644 brkraw/api/plugin/__init__.py diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 3e44148..261d4a1 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,4 @@ from .data import Study from ..config import ConfigManager -__all__ = [Study, ConfigManager] \ No newline at end of file +__all__ = ['Study', 'ConfigManager'] \ No newline at end of file diff --git a/brkraw/api/analyzer/__init__.py b/brkraw/api/analyzer/__init__.py index 4fed832..dce03d3 100644 --- a/brkraw/api/analyzer/__init__.py +++ b/brkraw/api/analyzer/__init__.py @@ -3,4 +3,4 @@ from .affine import AffineAnalyzer from .dataarray import DataArrayAnalyzer -__all__ = [BaseAnalyzer, ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer] \ No newline at end of file +__all__ = ['BaseAnalyzer', 'ScanInfoAnalyzer', 'AffineAnalyzer', 'DataArrayAnalyzer'] \ No newline at end of file diff --git a/brkraw/api/data/__init__.py b/brkraw/api/data/__init__.py index 5ca29bc..2b9f2b4 100644 --- a/brkraw/api/data/__init__.py +++ b/brkraw/api/data/__init__.py @@ -1,4 +1,4 @@ from .study import Study from .scan import Scan, ScanInfo -__all__ = [Study, Scan, ScanInfo] \ No newline at end of file +__all__ = ['Study', 'Scan', 'ScanInfo'] \ No newline at end of file diff --git a/brkraw/api/helper/__init__.py b/brkraw/api/helper/__init__.py index b74c10c..8df058c 100644 --- a/brkraw/api/helper/__init__.py +++ b/brkraw/api/helper/__init__.py @@ -8,5 +8,6 @@ from .fid import FID from .diffusion import Diffusion -__all__ = [Protocol, FID, FrameGroup, DataArray, Image, SlicePack, Cycle, Orientation, Diffusion, - to_matvec, from_matvec, rotate_affine] \ No newline at end of file +__all__ = ['Protocol', 'FID', 'FrameGroup', 'DataArray', + 'Image', 'SlicePack', 'Cycle', 'Orientation', 'Diffusion', + 'to_matvec', 'from_matvec', 'rotate_affine'] \ No newline at end of file diff --git a/brkraw/api/plugin/__init__.py b/brkraw/api/plugin/__init__.py new file mode 100644 index 0000000..a43766b --- /dev/null +++ b/brkraw/api/plugin/__init__.py @@ -0,0 +1,5 @@ +from .aggregator import Aggregator +from .plugged import Plugged +from .preset import Preset + +__all__ = ['Aggregator', 'Plugged', 'Preset'] \ No newline at end of file diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index df7c66c..76c26cf 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -4,4 +4,4 @@ from .pvfiles import PvFiles from .parameters import Parameter, Parser -__all__ = [PvDataset, PvScan, PvReco, PvFiles, Parameter, Parser] \ No newline at end of file +__all__ = ['PvDataset', 'PvScan', 'PvReco', 'PvFiles', 'Parameter', 'Parser'] \ No newline at end of file diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index a004236..31b1e38 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -2,8 +2,12 @@ dependency: bids, plugin """ -import argparse from brkraw import __version__ +from .base import BasePlugin, PvScan, PvReco, PvFiles +from .study import StudyToNifti, ScanToNifti +import argparse + +__all__ = ['BasePlugin', 'StudyToNifti', 'ScanToNifti', 'PvScan', 'PvReco', 'PvFiles'] def main(): """main script allows convert brkraw From 3adab0e8d491b50b1eec5fede3a6ac664360d69d Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 17:30:02 -0400 Subject: [PATCH 08/14] [refactor] api improve accessability and functionality --- brkraw/api/data/scan.py | 11 +++++---- brkraw/api/data/study.py | 1 - brkraw/api/pvobj/base.py | 43 ++++++++++++++++++++++++++++++----- brkraw/api/pvobj/pvdataset.py | 2 +- 4 files changed, 45 insertions(+), 12 deletions(-) diff --git a/brkraw/api/data/scan.py b/brkraw/api/data/scan.py index b42c961..811be91 100644 --- a/brkraw/api/data/scan.py +++ b/brkraw/api/data/scan.py @@ -2,6 +2,7 @@ from typing import Optional, Union import ctypes from ..pvobj import PvScan, PvReco, PvFiles +from ..pvobj.base import BaseBufferHandler from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer @@ -14,7 +15,7 @@ def num_warns(self): return len(self.warns) -class Scan: +class Scan(BaseBufferHandler): """The Scan class design to interface with analyzer, Args: @@ -36,8 +37,9 @@ def retrieve_study(self): if self._study_address: return ctypes.cast(self._study_address, ctypes.py_object).value - def set_scaninfo(self): - self.info = self.get_scaninfo(self.reco_id) + def set_scaninfo(self, reco_id:Optional[int] = None): + reco_id = reco_id or self.reco_id + self.info = self.get_scaninfo(reco_id) def get_scaninfo(self, reco_id:Optional[int] = None, get_analyzer:bool = False): infoobj = ScanInfo() @@ -62,9 +64,10 @@ def get_affine_analyzer(self, reco_id:Optional[int] = None): return AffineAnalyzer(info) def get_datarray_analyzer(self, reco_id: Optional[int] = None): - reco_id = reco_id if reco_id else self.reco_id + reco_id = reco_id or self.reco_id pvobj = self.retrieve_pvobj() fileobj = pvobj.get_2dseq(reco_id=reco_id) + self._buffers.append info = self.info if hasattr(self, 'info') else self.get_scaninfo(reco_id) return DataArrayAnalyzer(info, fileobj) diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index 78d5624..aa120c7 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -1,5 +1,4 @@ from __future__ import annotations -from typing import Dict from ..pvobj import PvDataset from .scan import Scan from pathlib import Path diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index 5a84e72..6576be1 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -3,10 +3,32 @@ import zipfile from collections import OrderedDict from collections import defaultdict -from typing import Optional +from typing import TYPE_CHECKING from pathlib import Path from .parameters import Parameter +if TYPE_CHECKING: + from typing import Optional, Union, List + from io import BufferedReader + from zipfile import ZipExtFile + + +class BaseBufferHandler: + _buffers: Union[List[BufferedReader], List[ZipExtFile]] = [] + def close(self): + if self._buffers: + for b in self._buffers: + if not b.closed: + b.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + # Return False to propagate exceptions, if any + return False + class BaseMethods: """ @@ -28,6 +50,9 @@ class BaseMethods: _rootpath = None _contents = None + def isinstance(self, name): + return self.__class__.__name__ == name + @staticmethod def _fetch_dir(path: 'Path'): """Searches for directories and files in a given directory and returns the directory structure. @@ -47,7 +72,9 @@ def _fetch_dir(path: 'Path'): for dirpath, dirnames, filenames in os.walk(abspath): normalized_dirpath = os.path.normpath(dirpath) relative_path = os.path.relpath(normalized_dirpath, abspath) - contents[relative_path] = {'dirs': dirnames, 'files': filenames, 'file_indexes': []} + file_sizes = [os.path.getsize(os.path.join(dirpath, f)) for f in filenames] + contents[relative_path] = {'dirs': dirnames, 'files': filenames, + 'file_indexes': [], 'file_sizes': file_sizes} return contents @staticmethod @@ -65,12 +92,13 @@ def _fetch_zip(path: 'Path'): - 'file_indexes': A list of file indexes. """ with zipfile.ZipFile(path) as zip_file: - contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': []}) + contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': [], 'file_sizes': []}) for i, item in enumerate(zip_file.infolist()): if not item.is_dir(): dirpath, filename = os.path.split(item.filename) contents[dirpath]['files'].append(filename) contents[dirpath]['file_indexes'].append(i) + contents[dirpath]['file_sizes'].append(item.file_size) while dirpath: dirpath, dirname = os.path.split(dirpath) if dirname: @@ -158,9 +186,11 @@ def __getattr__(self, key): fileobj = self._open_as_fileobject(file.pop()) if self._is_binary(fileobj): return fileobj - par = Parameter(fileobj.read().decode('UTF-8').split('\n'), + string_list = fileobj.read().decode('UTF-8').split('\n') + fileobj.close() + par = Parameter(string_list, name=key, scan_id=self._scan_id, reco_id=self._reco_id) - return par if par.is_parameter() else fileobj.read().decode('UTF-8').split('\n') + return par if par.is_parameter() else string_list raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") @property @@ -183,7 +213,8 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): try: if scan_id and hasattr(self, 'get_scan'): pvobj = self.get_scan(scan_id).get_reco(reco_id) - elif reco_id and hasattr(self, 'get_reco'): + elif hasattr(self, 'get_reco'): + reco_id = reco_id or sorted(self.avail).pop(0) pvobj = self.get_reco(reco_id) else: pvobj = self diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvdataset.py index 578a990..13b3c9e 100755 --- a/brkraw/api/pvobj/pvdataset.py +++ b/brkraw/api/pvobj/pvdataset.py @@ -65,7 +65,7 @@ def _check_dataset_validity(self, path: Path): None """ path = Path(path) if isinstance(path, str) else path - self._path = path.absolute() + self._path: Path = path.absolute() if not self._path.exists(): raise FileNotFoundError(f"The path '{self._path}' does not exist.") if self._path.is_dir(): From 0ea03a620689e48c7742635f8dd3290c4607a7b3 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 17:31:12 -0400 Subject: [PATCH 09/14] [refactor] app.tonifti improve functionality, including minor updates --- brkraw/app/tonifti/base.py | 38 ++++++++++++++++++++++++++---------- brkraw/app/tonifti/header.py | 8 ++++---- brkraw/app/tonifti/scan.py | 38 +++++++++++++++++++----------------- brkraw/app/tonifti/study.py | 3 +++ 4 files changed, 55 insertions(+), 32 deletions(-) diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 6217cbf..064715c 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -3,12 +3,14 @@ import numpy as np import nibabel as nib from enum import Enum -from typing import TYPE_CHECKING +from pathlib import Path from .header import Header - +from brkraw.api.pvobj.base import BaseBufferHandler +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from brkraw.api.data import Scan +from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional - from brkraw.api.data import Scan + from typing import Optional, Union from brkraw.api.plugin import Plugged @@ -22,7 +24,7 @@ class ScaleMode(Enum): HEADER = 2 -class BaseMethods: +class BaseMethods(BaseBufferHandler): def set_scale_mode(self, scale_mode:Optional[ScaleMode]=None): if scale_mode: self.scale_mode = scale_mode @@ -95,13 +97,29 @@ def get_nifti1image(scanobj:'Scan', reco_id:Optional[int] = None, subj_type:Optional[str] = None, subj_position:Optional[str] = None, plugin:Optional['Plugged']=None, plugin_kws:dict=None): if plugin and plugin.type == 'tonifti': - with plugin(scanobj, subj_type=subj_type, subj_position=subj_position, **plugin_kws) as p: - dataobj = p.get_dataobj() - affine = p.get_affine() + with plugin(scanobj, **plugin_kws) as p: + dataobj = p.get_dataobj(bool(scale_mode)) + affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) header = p.get_nifti1header() else: scale_mode = scale_mode or ScaleMode.HEADER dataobj = BaseMethods.get_dataobj(scanobj, reco_id, bool(scale_mode)) affine = BaseMethods.get_affine(scanobj, reco_id, subj_type, subj_position) - header = BaseMethods.get_nifti1header(scanobj, scale_mode) - return nib.Nifti1Image(dataobj, affine, header) \ No newline at end of file + header = BaseMethods.get_nifti1header(scanobj, reco_id, scale_mode) + return nib.Nifti1Image(dataobj, affine, header) + + +class BasePlugin(Scan, BaseMethods): + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], verbose: bool=False, **kwargs): + super().__init__(pvobj, **kwargs) + self.verbose = verbose + + def close(self): + super().close() + self.clear_cache() + + def clear_cache(self): + for buffer in self._buffers: + file_path = Path(buffer.name) + if file_path.exists(): + file_path.unlink() diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 48003af..15ee638 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -10,7 +10,7 @@ class Header: def __init__(self, scaninfo:'ScanInfo', scale_mode:Union['ScaleMode', int]): self.info = scaninfo - self.scale_mode = int(scale_mode) + self.scale_mode = int(scale_mode.value) self.nifti1header = Nifti1Header() self.nifti1header.default_x_flip = False self._set_scale_params() @@ -42,7 +42,7 @@ def _set_sliceorder(self): self.nifti1header['slice_code'] = slice_code def _set_time_step(self): - if self.info.cycle['num_cycle'] > 1: + if self.info.cycle['num_cycles'] > 1: time_step = self.info.cycle['time_step'] self.nifti1header['pixdim'][4] = time_step num_slices = self.info.slicepack['num_slices_each_pack'][0] @@ -50,8 +50,8 @@ def _set_time_step(self): def _set_scale_params(self): if self.scale_mode == 2: - self.nifti1header['scl_slope'] = self.info.dataarray['2dseq_slope'] - self.nifti1header['scl_inter'] = self.info.dataarray['2dseq_offset'] + self.nifti1header['scl_slope'] = self.info.dataarray['slope'] + self.nifti1header['scl_inter'] = self.info.dataarray['offset'] def get(self): return self.nifti1header \ No newline at end of file diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py index db8d46c..989e6d4 100644 --- a/brkraw/app/tonifti/scan.py +++ b/brkraw/app/tonifti/scan.py @@ -11,24 +11,29 @@ class ScanToNifti(Scan, BaseMethods): - def __init__(self, *paths: Path, **kwargs): + def __init__(self, *paths: Path, scale_mode: Optional[ScaleMode]=None, **kwargs): """_summary_ Args: data_path (str): path of '2dseq' file in reco_dir pars_path (str): path of 'visu_pars' file in reco_dir """ + self.scale_mode = scale_mode if len(paths) == 0: super().__init__(**kwargs) - if len(paths) == 1 and paths[0].is_dir(): - abspath = paths[0].absolute() - if contents := self._is_pvscan(abspath): - pvobj = self._construct_pvscan(abspath, contents) - elif contents := self._is_pvreco(abspath): - pvobj = self._construct_pvreco(abspath, contents) else: - pvobj = PvFiles(*paths) - super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) + + if len(paths) == 1 and paths[0].is_dir(): + abspath = paths[0].absolute() + if contents := self._is_pvscan(abspath): + pvobj = self._construct_pvscan(abspath, contents) + elif contents := self._is_pvreco(abspath): + pvobj = self._construct_pvreco(abspath, contents) + else: + pvobj = PvFiles(*paths) + # self.scanobj = Scan(pvobj=pvobj, reco_id=pvobj._reco_id) + super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) + @staticmethod def _construct_pvscan(path: 'Path', contents: 'OrderedDict') -> 'PvScan': @@ -80,29 +85,26 @@ def get_dataobj(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode scale_mode = scale_mode or self.scale_mode scale_correction = False if scale_mode == ScaleMode.HEADER else True if reco_id: - self._set_scaninfo(reco_id) + self.set_scaninfo(reco_id) return super().get_dataobj(scanobj=self, reco_id=reco_id, scale_correction=scale_correction) def get_data_dict(self, reco_id:Optional[int]=None): if reco_id: - self._set_scaninfo(reco_id) + self.set_scaninfo(reco_id) return super().get_data_dict(scanobj=self, reco_id=reco_id) def get_affine_dict(self, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): if reco_id: - self._set_scaninfo(reco_id) + self.set_scaninfo(reco_id) return super().get_affine_dict(scanobj=self, reco_id=reco_id, subj_type=subj_type, subj_position=subj_position) def get_nifti1header(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): scale_mode = scale_mode or self.scale_mode - if reco_id: - self._set_scaninfo(reco_id) - return super().get_nifti1header(self, scale_mode).get() + return super().get_nifti1header(self, reco_id, scale_mode).get() - def get_nifti1image(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, + def get_nifti1image(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None, plugin:Optional['Plugged']=None, plugin_kws:dict=None): scale_mode = scale_mode or self.scale_mode - scanobj = self.get_scan(scan_id, reco_id) - return super().get_nifti1image(scanobj, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) \ No newline at end of file + return super().get_nifti1image(self, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) \ No newline at end of file diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py index f3772b9..3c037c3 100644 --- a/brkraw/app/tonifti/study.py +++ b/brkraw/app/tonifti/study.py @@ -23,6 +23,9 @@ def get_scan(self, scan_id:int, reco_id:Optional[int] = None): study_address=id(self)) return self._cache[scan_id] + def get_scan_pvobj(self, scan_id:int, reco_id:Optional[int] = None): + return super().get_scan(scan_id).retrieve_pvobj() + def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): return self.get_scan(scan_id).get_scaninfo(reco_id, get_analyzer=True) From 4cfc1813ed2fda7b5b1d17247418c84f242234e4 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 17:32:04 -0400 Subject: [PATCH 10/14] [new feature] recipe helper for BIDS tool --- brkraw/api/helper/recipe.py | 97 +++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 brkraw/api/helper/recipe.py diff --git a/brkraw/api/helper/recipe.py b/brkraw/api/helper/recipe.py new file mode 100644 index 0000000..ae91f5e --- /dev/null +++ b/brkraw/api/helper/recipe.py @@ -0,0 +1,97 @@ +from __future__ import annotations +import re +import warnings +from collections import OrderedDict +from typing import TYPE_CHECKING +from .base import BaseHelper +if TYPE_CHECKING: + from typing import Optional, Dict, List, Any + from brkraw.api.analyzer import BaseAnalyzer + +class Recipe(BaseHelper): + def __init__(self, target: 'BaseAnalyzer', recipe: dict, legacy: bool = False, + startup_scripts:Optional[List[str]] = None): + self.target = target + self.recipe = recipe + self.results = OrderedDict() + self.backward_comp = legacy + self.startup_scripts = startup_scripts + self._parse_recipe() + + def _parse_recipe(self): + for key, value in self.recipe.items(): + self.results[key] = self._eval_value(value) + + def _eval_value(self, value: Any): + if isinstance(value, str): + value = self._process_str(value) + elif isinstance(value, list): + value = self._process_list(value) + elif isinstance(value, dict): + value = self._process_dict(value) + return value + + def _legacy_parser(self, param_key: str): + for pars in ['acqp', 'method', 'visu_pars']: + value = getattr(self.target, pars).get(param_key) + if value is not None: + return value + return param_key + + def _process_str(self, str_obj: str): + if self.backward_comp: + return self._legacy_parser(str_obj) + ptrn = r'(?P^[a-zA-Z][a-zA-Z0-9_]*)\.(?P[a-zA-Z][a-zA-Z0-9_]*)' + if matched := re.match(ptrn, str_obj): + attr = getattr(self.target, matched['attr']) + return attr.get(matched['key'], None) + else: + return str_obj + + def _process_list(self, list_obj: List): + for c in list_obj: + processed = self._eval_value(c) + if processed is not None: + return processed + return None + + def _process_dict(self, dict_obj: Dict): + script_cmd = 'Equation' if self.backward_comp else 'script' + if script_cmd in dict_obj.keys(): + return self._process_dict_case_script(dict_obj, script_cmd) + elif 'key' in dict_obj.keys(): + return self._process_dict_case_pick_from_list(dict_obj) + else: + processed = {} + for key, value in dict_obj.items(): + processed[key] = self._eval_value(value) + return processed + + def _process_dict_case_script(self, dict_obj: Dict, script_cmd: List[str]): + script = dict_obj.pop(script_cmd) + for s in self.startup_scripts: + exec(s) + for key, value in dict_obj.items(): + value = self._eval_value(value) + if value == None: + return None + exec(f'global {key}') + exec(f'{key} = {value}') + exec(f"output = {script}", globals(), locals()) + return locals()['output'] + + def _process_dict_case_pick_from_list(self, dict_obj: Dict): + key = dict_obj.pop('key') + value = self._process_str(key) + if not isinstance(value, list): + warnings.warn(f"The value returned from '{key}' is not of type 'list'.", UserWarning) + return None + if 'where' in dict_obj.keys(): + hint = self._eval_value(dict_obj.pop('where')) + return value.index(hint) if hint in value else None + elif 'idx' in dict_obj.keys(): + idx = self._eval_value(dict_obj.pop('idx')) + return value[idx] if idx < len(value) else None + + def get(self): + return self.results \ No newline at end of file From 141ecc96678df274d384b98f5bb00d07f04c5799 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 17:33:35 -0400 Subject: [PATCH 11/14] [hotfix] recon module fix following app.to_nifti refactoring --- brkraw/lib/recon.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/brkraw/lib/recon.py b/brkraw/lib/recon.py index 9efec11..383b6ef 100644 --- a/brkraw/lib/recon.py +++ b/brkraw/lib/recon.py @@ -17,7 +17,7 @@ """ from .recoFunctions import phase_rotate, phase_corr, zero_filling -from ..api.brkobj.scan import ScanObj +from ..api.data import Scan import numpy as np import warnings @@ -26,7 +26,8 @@ def reconstruction(scanobj,process='image', **kwargs): # Ensure Scans are Image Based - ACQ_dim_desc = [scanobj.acqp.get('ACQ_dim_desc')] if isinstance(scanobj.acqp.get('ACQ_dim_desc'), str) else scanobj.acqp.get('ACQ_dim_desc') + acqp = scanobj.pvobj.acqp + ACQ_dim_desc = [acqp.get('ACQ_dim_desc')] if isinstance(acqp.get('ACQ_dim_desc'), str) else acqp.get('ACQ_dim_desc') if 'Spectroscopic' in ACQ_dim_desc: warnings.warn('Scan is spectroscopic') process = 'readout' @@ -40,10 +41,11 @@ def reconstruction(scanobj,process='image', **kwargs): return recoObj.reconstruct(rms=kwargs['rms'] if 'rms' in kwargs.keys() else True) class Reconstruction: - def __init__(self, scanobj:'ScanObj', reco_id:'int'=1) -> None: - self.acqp = scanobj.acqp - self.method = scanobj.method - self.fid = scanobj.get_fid() + def __init__(self, scanobj:'Scan', reco_id:'int'=1) -> None: + pvscan = scanobj.pvobj + self.acqp = pvscan.acqp + self.method = pvscan.method + self.fid = pvscan.get_fid() self.CS = True if self.method.get('PVM_EncCS')=='Yes' else False self.NI = self.acqp['NI'] self.NR = self.acqp['NR'] @@ -51,7 +53,7 @@ def __init__(self, scanobj:'ScanObj', reco_id:'int'=1) -> None: self.reco_id = reco_id self.info = scanobj.get_info(self.reco_id) self.protocol = self.info.protocol - self.reco = scanobj.get_reco(self.reco_id).reco + self.reco = pvscan.get_reco(self.reco_id).reco self.supported_protocol = any([True for i in SUPPORTED_PROTOCOLS if i in self.protocol['protocol_name'].lower()]) # 1) Convert Buffer to a np array From 6f270a817919842df207d7ec0d29e58bbfdef1fc Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 17:34:11 -0400 Subject: [PATCH 12/14] [plugin] sordino2nifti plugin --- plugin/sordino/config.yaml | 50 +++++ plugin/sordino/sordino.py | 384 ++++++++++++++++++++++++++++++++++ plugin/sordino/sordino2nii.py | 380 +++++++++++++++++++++++++++++++++ 3 files changed, 814 insertions(+) create mode 100644 plugin/sordino/config.yaml create mode 100644 plugin/sordino/sordino.py create mode 100644 plugin/sordino/sordino2nii.py diff --git a/plugin/sordino/config.yaml b/plugin/sordino/config.yaml new file mode 100644 index 0000000..18cf2eb --- /dev/null +++ b/plugin/sordino/config.yaml @@ -0,0 +1,50 @@ +name: sordino + +version: 24.4.27 + +authors: +- Sung-Ho Lee (shlee@unc.edu) + +license: MIT +description: "This plugin converts SORDINO-MRI (acquired in radial trajectory) to NIfTI format." +documentation_url: "https://github.com/sordino" + +app: + target: tonifti + src: sordino.py::Sordino + +dependencies: +- numpy: ">=1.18" +- scipy: ">=1.11.1" +- sigpy: ">=0.1.26" + +script: + name: "sordino" + help: "plugin function for SORDINO-MRI reconstruction" + arguments: + - name: "-e" + long_name: "--extention" + help: "Extension factors for FOV regridding (LR AP SI)" + nargs: "+" + type: "list, int" + default: [1,1,1] + - name: "--offset" + help: "Index of offset frame (start point of reconstruction)" + type: "int" + default: null + - name: "--num-frames" + help: "Specify number of frames from offset." + type: "int" + default: null + - name: "--tmpdir" + help: "Specify number of frames from offset." + type: "int" + default: null + - name: "--spoketiming" + help: "apply spoke timing correction." + action: "store_true" + default: false + - name: "--mem-limit" + help: "set limit of memory size when loading data (in GB)." + type: "float" + default: 0.5 diff --git a/plugin/sordino/sordino.py b/plugin/sordino/sordino.py new file mode 100644 index 0000000..83358d8 --- /dev/null +++ b/plugin/sordino/sordino.py @@ -0,0 +1,384 @@ +from __future__ import annotations +import os +import sys +import warnings +import tempfile +import numpy as np +import sigpy as sp +from pathlib import Path +from tqdm import tqdm +from scipy.interpolate import interp1d +from brkraw.app.tonifti import BasePlugin +from brkraw.app.tonifti.base import ScaleMode +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from nibabel.nifti1 import Nifti1Header + from typing import List, Union, Optional, Tuple + from brkraw.app.tonifti import PvScan, PvFiles + from io import BufferedReader, BufferedWriter + from zipfile import ZipExtFile + +class Sordino(BasePlugin): + """ SORDINO: Plugin to Convert Bruker's SORDINO Images to NifTi File for ToNifti App in Brkraw + ParaVision Compatability: > 360.x + """ + _recon_dtype: Optional[np.dtype] = None + + def __init__(self, pvobj: Union['PvScan', 'PvFiles'], + ext_factors: List[float, float, float]=None, + offset: Optional[int]=None, + num_frames: Optional[int]=None, + spoketiming: Optional[bool]=False, + mem_limit: Optional[float]=None, + tmpdir: Optional[Path]=None, + nufft: Optional[str]='sigpy', + scale_mode: Optional[ScaleMode]=None, + **kwargs + ) -> None: + super().__init__(pvobj, **kwargs) + self._inspect() + self._set_params() + self._set_cache(tmpdir) + self.ext_factors: List[float, float, float] = ext_factors or [1,1,1] + self.offset: int = offset or 0 + self.spoketiming: bool = spoketiming + self.mem_limit: Optional[float] = mem_limit + self.num_frames: Optional[int] = num_frames or self._num_frames + self.nufft: str = nufft + self.scale_mode: ScaleMode = scale_mode or ScaleMode.HEADER + self.slope, self.inter = 1, 0 + + def get_dataobj(self) -> np.ndarray: + self._set_trajectory() + if self.spoketiming and self.num_frames > 1: + bufferobj, buffer_size = self._fid_correct_spoketiming() + dataobj = self._recon_fid(bufferobj, buffer_size) + else: + dataobj = self._recon_fid() + dataobj = np.abs(dataobj) # magnitude image only + dataobj = self._dataobj_correct_orientation(dataobj) + if self.scale_mode == ScaleMode.HEADER: + self._calc_slope_inter(dataobj) + dataobj = self._dataobj_rescale_to_uint16(dataobj) + return dataobj + + def get_affine(self, reco_id:Optional[int]=None, + subj_type:Optional[str]=None, + subj_position:Optional[str]=None) -> np.ndarray: + return super().get_affine(self, reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) + + def get_nifti1header(self) -> Nifti1Header: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + header = super().get_nifti1header(self) + if self.scale_mode == ScaleMode.HEADER: + header.set_slope_inter(*self._calc_slope_inter()) + return header + + def _calc_slope_inter(self, dataobj: np.ndarray): + dmax = np.max(dataobj) + self.inter = np.min(dataobj) + self.slope = (dmax - self.inter) / 2**16 + + def _dataobj_rescale_to_uint16(self, dataobj: np.ndarray) -> np.ndarray: + if self.verbose: + print(f" + convert dtype to UINT16") + print(f" - Slope: {self.slope:.3f}") + print(f" - Intercept: {self.inter:.3f}") + if dataobj.ndim > 3: + converted_dataobj = [] + for t in tqdm(range(dataobj.shape[-1]), desc=' - Frame', file=sys.stdout, ncols=80): + converted_dataobj.append(((dataobj[..., t] - self.inter) / self.slope).round().astype(np.uint16)[..., np.newaxis]) + converted_dataobj = np.concatenate(converted_dataobj, axis=-1) + else: + converted_dataobj = ((dataobj - self.inter) / self.slope).round().astype(np.uint16) + return converted_dataobj + + def _set_cache(self, tmpdir: Optional[Path]) -> None: + self.tmpdir: Path = tmpdir or Path.home() / '.tmp' + self.tmpdir.mkdir(exist_ok=True) + + def _inspect(self) -> None: + """ + Inspect the provided pvobj to ensure it is compatible with this plugin. + + Args: + pvobj (object): The object to be validated for compatibility. + + Raises: + NotImplementedError: If the pvobj's type or attributes do not meet the plugin's requirements. + + This method checks for the necessary attributes in the pvobj required for SORDINO reconstruction. + It also validates that the pvobj is not an instance of 'PvReco', as support for this type is not implemented. + """ + if self.verbose: + print("++ Inspecting compatibility of imported pvobj for SORDINO reconstruction...") + if self.pvobj.isinstance('PvReco'): + raise NotImplementedError("Support for 'PvReco' is not implemented.") + required_fields = ['method', 'acqp', 'traj', 'rawdata.job0'] + missing_fields = [field for field in required_fields if not hasattr(self.pvobj, field)] + if missing_fields: + error_message = f"Input pvobj is missing required fields: {', '.join(missing_fields)}" + raise NotImplementedError(error_message) + if not hasattr(self.info, 'orientation'): + warnings.warn("Input object is missing the 'orientation' attribute. This is not critical, but the orientation of the reconstructed image might be incorrect.", UserWarning) + + def _get_fid(self) -> Union[BufferedReader, ZipExtFile]: + return self.pvobj.get_fid() + + def _get_traj(self) -> Union[BufferedReader, ZipExtFile]: + return self.pvobj['traj'] + + def _set_params(self) -> None: + # acqp params + if self.verbose: + print("\n++ Fetch parameters from PvObj...") + acqp = self.pvobj.acqp + nr = acqp['NR'] + ni = acqp['NI'] + nae = acqp['NAE'] + ns = acqp['NSLICES'] + acq_jobs = acqp['ACQ_jobs'][0] + + self.repetition_time = acqp['ACQ_repetition_time'] / 1000 + self.fid_shape = np.array([2, acq_jobs[0]/2, acq_jobs[3]/(nr*nae), acq_jobs[-2], ns]).astype(int).tolist() + self._num_frames = int(nr * ni / ns) + self.dtype_code = self.info.fid['dtype'] + self.buffer_size = np.prod(self.fid_shape) * self.dtype_code.itemsize + + # method + method = self.pvobj.method + npro = method['NPro'] + mat_size = method['PVM_Matrix'] + over_samp = method['OverSampling'] + + self.mat_size = np.array(mat_size) + self.num_pts = int((mat_size[0] * over_samp) / 2) + self.smp_idx = np.arange(0, int(mat_size[2] / 2)) + self.smp_idx_oversmp = np.array([c + s for s in self.smp_idx \ + for c in np.linspace(0, 1 - 1 / over_samp, over_samp)]) + self.traj_shape = np.array([3, mat_size[0]/2, npro]).astype(int) + + def _set_trajectory(self) -> None: + with self._get_traj() as f: + self.traj = np.frombuffer(f.read(), np.double).reshape(self.traj_shape, order='F') + self._traj_apply_oversampling() + self._traj_correct_readout_direction() + self._traj_correct_projection_order() + if all(np.array(self.ext_factors) != 1): + if self.verbose: + ef = map('*'.join, list(zip(['x', 'y', 'z'], map(str, self.ext_factors)))) + print(f" + Extention factors: {', '.join(list(ef))}") + self._traj_apply_extension_factors() + + def _traj_apply_oversampling(self) -> None: + traj = np.zeros((3, self.num_pts, self.fid_shape[2])) + for i, coord in enumerate(self.traj): + step = np.mean(np.diff(coord, 1, axis=0), axis=0) + coord = np.insert(coord, -1, coord[-1, :], axis=0) + coord[-1, :] = coord[-2, :] + step + func = interp1d(np.append(self.smp_idx, self.smp_idx[-1]+1), coord, axis=0) + # Evaluating trajectory at finer intervals + traj[i, :, :] = func(self.smp_idx_oversmp) + self.traj = traj + + def _traj_correct_readout_direction(self) -> None: + self.traj[:, :, ::2] = -self.traj[:, :, 1::2] + self.traj = np.multiply(self.traj, self.mat_size[:, np.newaxis, np.newaxis] + .repeat(self.traj.shape[1], 1) + .repeat(self.traj.shape[2], 2)) + + def _traj_correct_projection_order(self) -> None: + proj_order = np.concatenate([np.arange(0, self.fid_shape[2], 2), + np.arange(1, self.fid_shape[2], 2)]) + self.traj = self.traj[:, :, proj_order] + + def _traj_apply_extension_factors(self): + for i, ef in self.ext_factors: + self.traj[i] *= ef + + def _dataobj_correct_orientation(self, dataobj) -> None: + """Correct the subject orientation to match that of the online reconstructed image. + Note: This is experimental and has only been tested on a small number of cases. + Subject to updates and revisions.""" + # logical to physical orientation correction + grad_mat = self.pvobj.acqp['ACQ_GradientMatrix'][0] + corrected_dataobj = self._rotate_dataobj(dataobj, grad_mat) + if hasattr(self.info, 'orientation'): + visu_ori = self.info.orientation['orientation'] + corrected_dataobj = self._rotate_dataobj(corrected_dataobj, visu_ori) + return corrected_dataobj + + @staticmethod + def _rotate_dataobj(dataobj: np.ndarray, rotation_matrix) -> np.ndarray: + axis_order = np.nonzero(rotation_matrix.T)[1].tolist() + if len(dataobj.shape) > 3: + axis_order += [3] + corrected_dataobj = np.transpose(dataobj, axis_order) + x, y, z = rotation_matrix.sum(0) + return corrected_dataobj[::x, ::y, ::z, ...] + + def _recon_fid(self, filepath=None, buffer_size=None) -> np.ndarray: + buffer_size = buffer_size or self.buffer_size + output_shape = np.round(self.mat_size * self.ext_factors, decimals=0).tolist() + buffer_offset = 0 if filepath else self.offset * buffer_size + if self.verbose: + print("\n++ Reconstruction (FID -> Image[complex])") + print(f" + Output shape: {'x'.join(map(str, output_shape))}") + ndim = 4 if self.num_frames > 1 else 3 + print(f" + Output dim: {ndim}") + if self.offset: + print(f" + Frame offset: {self.offset}") + if ndim == 4: + print(f" + Output num of frames: {self.num_frames}") + with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=self.tmpdir) as f_recon: + if filepath: + with open(filepath, 'rb') as f_fid: + self._recon_process(f_fid, f_recon, buffer_offset, output_shape) + else: + with self._get_fid() as f_fid: + self._recon_process(f_fid, f_recon, buffer_offset, output_shape) + if self.verbose: + print(f" + Converting dtype (complex -> float32)...", end='') + shape = output_shape + [self.num_frames] if self.num_frames > 1 else output_shape + f_recon.seek(0) + recon = np.frombuffer(f_recon.read(), dtype=self._recon_dtype).reshape(shape, order='F') + if self.verbose: + print('Success') + self._buffers.append(f_recon) + return recon + + def _recon_process(self, + fid_buffer: Union['BufferedReader', 'ZipExtFile'], + recon_buffer: 'BufferedWriter', + buffer_offset: int, + output_shape: list) -> None: + fid_buffer.seek(buffer_offset) + if self.verbose: + framerange = tqdm(range(self.num_frames), desc=' -Frames', file=sys.stdout, ncols=80) + else: + framerange = range(self.num_frames) + for n in framerange: + buffer = fid_buffer.read(self.buffer_size) + fid = np.frombuffer(buffer, dtype=self.dtype_code).reshape(self.fid_shape, order='F') + fid = (fid[0] + 1j*fid[1])[np.newaxis, ...] + volume = self._apply_nufft(fid, output_shape) + if n == 0: + self._recon_dtype = volume.dtype + recon_buffer.write(volume.T.flatten(order='C').tobytes()) + + def _apply_nufft(self, fid: np.ndarray, output_shape: list) -> np.ndarray: + if self.nufft == 'sigpy': + return self._sigpy_nufft(fid, output_shape) + else: + raise NotImplementedError + + def _sigpy_nufft(self, fid: np.ndarray, output_shape: list) -> np.ndarray: + dcf = np.square(self.traj).sum(0).T + return sp.nufft_adjoint(fid.squeeze().T * dcf, self.traj.T, oshape=output_shape) + + def _fid_correct_spoketiming(self) -> Tuple[str, int]: + # parameters for spoke timing correction + num_spokes = self.fid_shape[2] + + if self.verbose: + print("\n++ Running spoke timing correction") + + with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=self.tmpdir) as stc_f: + with self._get_fid() as fid_f: + file_size = self._fid_get_filesize(fid_f) + segs = self._fid_split_by_filesize(file_size, num_spokes) + stc_buffer_size = self._fid_process_spoketiming(segs, fid_f, stc_f, num_spokes) + self._buffers.append(stc_f) + return stc_f.name, stc_buffer_size + + def _fid_get_filesize(self, fid_buffer: Union['BufferedReader', 'ZipExtFile']) -> float: + if self.pvobj.is_compressed: + fid_fname = os.path.basename(fid_buffer.name) + fid_idx = [i for i, f in enumerate(self.pvobj._contents['files']) if fid_fname in f].pop() + file_size = self.pvobj._contents['file_sizes'][fid_idx] + else: + file_size = os.fstat(fid_buffer.fileno()).st_size + return ((file_size / self._num_frames) * self.num_frames) / 1024**3 # in GB + + def _fid_split_by_filesize(self, file_size: float, num_spokes: int) -> List[int]: + num_segs = np.ceil(file_size / self.mem_limit).astype(int) if self.mem_limit else 1 + if self.verbose: + print(f' + Size: {file_size:.3f} GB') + print(f' + Split data into {num_segs} segments for saving memory.') + + num_spokes_per_seg = int(np.ceil(num_spokes / num_segs)) if num_segs > 1 else num_spokes + if residual_spokes := num_spokes % num_spokes_per_seg: + segs = [num_spokes_per_seg for _ in range(num_segs -1)] + [residual_spokes] + else: + segs = [num_spokes_per_seg for _ in range(num_segs)] + return segs + + def _fid_process_spoketiming(self, segs: list[int], + fid_buffer: Union['BufferedReader', 'ZipExtFile'], + img_buffer: 'BufferedWriter', + num_spokes: int) -> int: + num_echos = self.fid_shape[1] + recon_buffer_offset = self.offset * self.buffer_size + + spoke_loc = 0 + stc_dtype = None + stc_buffer_size = None + + if self.verbose: + segrange = tqdm(segs, desc=' - Segments', file=sys.stdout, ncols=80) + else: + segrange = segs + for seg_size in segrange: + # load data + spoke_buffer_size = int(self.buffer_size/num_spokes) + spoke_offset = spoke_loc * spoke_buffer_size + # total buffer size for current segment + seg_buffer_size = spoke_buffer_size * seg_size + seg = [] + for t in range(self.num_frames): + frame_offset = t * self.buffer_size + fid_buffer.seek(recon_buffer_offset + frame_offset + spoke_offset) + seg.append(fid_buffer.read(seg_buffer_size)) + seg_shape = [2, num_echos, seg_size, self.num_frames] + seg_data = np.frombuffer(b''.join(seg), dtype=self.dtype_code).reshape(seg_shape, order='F') + # Spoke timing correction + corrected_seg_data = self._fid_interpolate_segment(seg_data, seg_size, num_echos, + num_spokes, spoke_loc) + + # Store data + for t in range(self.num_frames): + frame_offset = t * self.buffer_size + img_buffer.seek(frame_offset + spoke_offset) + img_buffer.write(corrected_seg_data[:,:,:, t].flatten(order='F').tobytes()) + + if not stc_dtype: + stc_dtype = corrected_seg_data.dtype + stc_buffer_size = np.prod(self.fid_shape) * stc_dtype.itemsize + spoke_loc += seg_size + return stc_buffer_size + + def _fid_interpolate_segment(self, + seg_data: np.ndarray, + seg_size: int, + num_echos: int, + num_spokes: int, + spoke_loc: int) -> np.ndarray: + scan_time_per_vol = num_spokes * self.repetition_time + target_timing = scan_time_per_vol / 2 + base_timestamps = np.arange(self.num_frames) * scan_time_per_vol + target_timestamps = base_timestamps + target_timing + corrected_seg_data = np.empty_like(seg_data) + for spoke_id in range(seg_size): + cur_spoke = spoke_loc + spoke_id + ref_timestamps = base_timestamps + (cur_spoke * self.repetition_time) + for c in range(2): # real and imaginary (complex) + for e in range(num_echos): + interp_func = interp1d(ref_timestamps, + seg_data[c, e, spoke_id, :], + kind='linear', + fill_value='extrapolate') + corrected_seg_data[c, e, spoke_id, :] = interp_func(target_timestamps) + return corrected_seg_data diff --git a/plugin/sordino/sordino2nii.py b/plugin/sordino/sordino2nii.py new file mode 100644 index 0000000..da50ac3 --- /dev/null +++ b/plugin/sordino/sordino2nii.py @@ -0,0 +1,380 @@ +#!/usr/bin/env python3 + +import argparse +import os +import sys +import tempfile +import brkraw as brk +from typing import Tuple, List, Optional, Any +import sigpy as sp +import nibabel as nib +import numpy as np +from numpy.fft import fftn, ifftn, fftshift, ifftshift +from scipy.signal import get_window +from scipy.interpolate import interp1d +from tqdm import tqdm + +# Dependencies brkraw, sigpy, nibabel, numpy, scipy, and tdqm +# tested on Python 3.8, 3.10 +# Author: SungHo Lee(shlee@unc.edu) + +## Brk-bart +def parse_acqp(rawobj, scan_id): + # acqp parameter parsing + acqp = rawobj.get_acqp(scan_id).parameters + nr = acqp['NR'] + ni = acqp['NI'] + nae = acqp['NAE'] # number of average + ns = acqp['NSLICES'] + acq_jobs = acqp['ACQ_jobs'][0] + sz = [acq_jobs[0]/2, acq_jobs[3]/(nr*nae), acq_jobs[-2]] + + wordtype = brk.lib.reference.WORDTYPE[f'_{"".join(acqp["ACQ_word_size"].split("_"))}_SGN_INT'] + byteorder = brk.lib.reference.BYTEORDER[f'{acqp["BYTORDA"]}Endian'] + + tr = acqp['ACQ_repetition_time'] / 1000 + dtype_code = np.dtype(f'{byteorder}{wordtype}') + fid_shape = np.array([2] + sz + [ns]).astype(int).tolist() + num_frames = int(nr*ni/ns) + buffer_size = np.prod(fid_shape)*dtype_code.itemsize + + return dict(fid_shape=fid_shape, + dtype_code=dtype_code, + num_frames=num_frames, + buffer_size=buffer_size, + repetition_time=tr) + + +def get_traj(rawobj, scan_id): + method = rawobj.get_method(scan_id).parameters + traj_shape = np.array( + [3, method['PVM_Matrix'][0]/2, method['NPro']]).astype(int) + return np.frombuffer(rawobj._pvobj.get_traj(scan_id), np.double).reshape(traj_shape, order='F') + + +def calc_trajectory(rawobj, scan_id, params, ext_factors): + method = rawobj.get_method(scan_id).parameters + over_samp = method['OverSampling'] + mat_size = np.array(method['PVM_Matrix']) + + num_pts = int((mat_size[0] * over_samp) / 2) + smp_idx = np.arange(0, int(mat_size[0] / 2)) + smp_idx_oversmp = np.array( + [c+s for s in smp_idx for c in np.linspace(0, 1-1/over_samp, over_samp)]) + + # trajectory + traj = get_traj(rawobj, scan_id) + + # calculate trajectory oversampling + # axis2 = number of projection + traj_oversmp = np.zeros((3, num_pts, params['fid_shape'][2])) + for i, coord in enumerate(traj): + + step = np.mean(np.diff(coord, 1, axis=0), axis=0) + + coord = np.insert(coord, -1, coord[-1, :], axis=0) + coord[-1, :] = coord[-2, :] + step + func = interp1d(np.append(smp_idx, smp_idx[-1]+1), coord, axis=0) + + # Evaluating trajectory at finer intervals + traj_oversmp[i, :, :] = func(smp_idx_oversmp) + + traj_oversmp[:, :, ::2] = -traj_oversmp[:, :, 1::2] # correct direction + + traj_oversmp = np.multiply(traj_oversmp, mat_size[:, np.newaxis, np.newaxis] + .repeat(traj_oversmp.shape[1], 1) + .repeat(traj_oversmp.shape[2], 2)) + + proj_order = np.concatenate([np.arange(0, params['fid_shape'][2], 2), + np.arange(1, params['fid_shape'][2], 2)]) + + # apply extend FOV factor + traj_adjusted = traj_oversmp[:, :, proj_order] + traj_adjusted[0] *= ext_factors[0] + traj_adjusted[1] *= ext_factors[1] + traj_adjusted[2] *= ext_factors[2] + return traj_adjusted + + +def validate_int_or_list(value): + if len(value) in {1, 3}: + return int(value) + else: + raise argparse.ArgumentTypeError("Size must be either 1 or 3.") + +__version__ = '24.1.01' + +def main(): + parser = argparse.ArgumentParser(prog="sordino2nii", description='Reconstruction tool for SORDINO fMRI sequence') + + parser.add_argument("-i", "--input", help="input path of raw data", type=str, default=None) + parser.add_argument("-o", "--prefix", help='output prefix of reconstructed image', type=str, default='output') + parser.add_argument("-s", "--scanid", help="scan id", type=int, default=None) + parser.add_argument("-e", "--extention", help="Extension factors for FOV regridding (LR AP SI)", nargs="+", + type=validate_int_or_list, default=[1,1,1]) + parser.add_argument("--offset", help="Index of offset frame (start point of reconstruction)", type=int, default=0) + parser.add_argument("--num-frames", help='Number of frames to be reconstructed (from offset)', type=int, default=None) + parser.add_argument("--tmpdir", help="folder to store temporary file", type=str, default=None) + parser.add_argument("--spoketiming", help="apply spoke timing correction", action='store_true') + parser.add_argument("--clear-cache", help='delete intermediate binary files generated during reconstruction', action='store_true') + parser.add_argument("--mem-limit", help='set limit of memory size when loading data (in GB)', type=float, default=0.5) + + print(f"++ sordino2nii(v{__version__}): Reconstruction tool for SORDINO fMRI sequence") + print("++ Authored by: SungHo Lee (email: shlee@unc.edu)") + + args = parser.parse_args() + tqdm_ncols = 100 + # fetch variables + mem_limit = args.mem_limit + scan_id = args.scanid + offset = args.offset + ext_factor = args.extention * 3 if len(args.extention) == 1 else args.extention + # gamma = args.gamma[0] if len(args.gamma) == 1 else args.gamma + prefix = args.prefix + + # create temporary folder + tmpdir = args.tmpdir or os.path.join(os.curdir, '.tmp') + os.makedirs(tmpdir, exist_ok=True) + + # loading data + try: + print(f"\n++ Loading input Bruker rawdata {args.input}") + raw = brk.load(args.input) + params = parse_acqp(raw, scan_id) + pvobj = raw._pvobj + num_frames = args.num_frames or params['num_frames'] + except: + parser.print_help() + sys.exit() + + # parameters for reconstruction + print("\n++ Fetch parameters from rawdata") + num_echos, num_spokes = params['fid_shape'][1:3] + raw_buffer_size = params['buffer_size'] + raw_dtype = params['dtype_code'] + recon_buffer_offset = offset * raw_buffer_size + print(f" + Extention factors: x*{ext_factor[0]}, y*{ext_factor[1]}, z*{ext_factor[2]}") + ext_factor = [ext_factor[1], ext_factor[0], ext_factor[2]] + ## calculate trajectory + visu_pars = raw.get_visu_pars(scan_id, 1).parameters + traj = calc_trajectory(raw, scan_id, params, ext_factor) + coord = traj.T + dcf = coord[..., 0] ** 2 + coord[..., 1] ** 2 + coord[..., 2] ** 2 + oshape = (np.array(visu_pars['VisuCoreSize']) * ext_factor).tolist() + print(f" + Output shape: {oshape[1]}x{oshape[0]}x{oshape[2]}") + ndim = 4 if num_frames > 1 else 3 + print(f" + Output dim: {ndim}") + if offset: + print(f" + Frame offset: {offset}") + if ndim == 4: + print(f" + Output num of frames: {num_frames}") + + # Image reconstruction + _caches = [] + with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=tmpdir) as recon_f: + recon_dtype = None + tr = params['repetition_time'] + scan_time_per_vol = num_spokes * tr + # Spoke timing correction + if args.spoketiming: + print("\n++ Running spoke timing correction for --spoketiming") + ## Run spoke timing correction + # parameters for spoke timing correction + + target_timing = scan_time_per_vol / 2 + base_timestamps = np.arange(num_frames) * scan_time_per_vol + target_timestamps = base_timestamps + target_timing + spoke_buffer_size = int(raw_buffer_size/num_spokes) + + with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=tmpdir) as stc_f: + with pvobj._open_object(pvobj._fid[scan_id]) as fid_f: + # print the size of file in GB + try: + file_size = pvobj.filelist[pvobj._fid[scan_id]].file_size / params['num_frames'] * num_frames / 1024**3 + except: + file_size = os.path.getsize(pvobj._fid[scan_id]) / params['num_frames'] * num_frames / 1024**3 + print(f' + Size: {file_size:.3f} GB') + + # for safety reason, cut data into the size defined at limit_mem_size (in GB) + num_segs = np.ceil(file_size / mem_limit).astype(int) + print(f' + Split data into {num_segs} segments for saving memory.') + + num_spokes_per_seg = int(np.ceil(num_spokes / num_segs)) + if residual_spokes := num_spokes % num_spokes_per_seg: + segs = [num_spokes_per_seg for _ in range(num_segs -1)] + [residual_spokes] + else: + segs = [num_spokes_per_seg for _ in range(num_segs)] + + spoke_loc = 0 + stc_dtype = None + stc_buffer_size = None + for seg_size in tqdm(segs, desc=' - Segments', file=sys.stdout, ncols=tqdm_ncols): + # load data + spoke_offset = spoke_loc * spoke_buffer_size + seg_buffer_size = spoke_buffer_size * seg_size # total buffer size for current segment + seg = [] + for t in range(num_frames): + frame_offset = t * raw_buffer_size + fid_f.seek(recon_buffer_offset + frame_offset + spoke_offset) + seg.append(fid_f.read(seg_buffer_size)) + seg_data = np.frombuffer(b''.join(seg), dtype=raw_dtype).reshape([2, num_echos, + seg_size, num_frames], + order='F') + # Spoke timing correction + corrected_seg_data = np.empty_like(seg_data) + for spoke_id in range(seg_size): + cur_spoke = spoke_loc + spoke_id + ref_timestamps = base_timestamps + (cur_spoke * tr) + for c in range(2): # real and imaginary (complex) + for e in range(num_echos): + interp_func = interp1d(ref_timestamps, + seg_data[c, e, spoke_id, :], + kind='linear', + fill_value='extrapolate') + corrected_seg_data[c, e, spoke_id, :] = interp_func(target_timestamps) + + + # Store data + for t in range(num_frames): + frame_offset = t * raw_buffer_size + stc_f.seek(frame_offset + spoke_offset) + stc_f.write(corrected_seg_data[:,:,:, t].flatten(order='F').tobytes()) + + if not stc_dtype: + stc_dtype = corrected_seg_data.dtype + stc_buffer_size = np.prod(params['fid_shape']) * stc_dtype.itemsize + + spoke_loc += seg_size + print(' + Success') + # clear memory (we only needs stc_f, stc_dtype, stc_buffer_size) + ## spoke timing prep related namespaces + del tr, target_timing, + del base_timestamps, target_timestamps, spoke_buffer_size + ## segmenting related namespaces + del file_size, mem_limit, num_segs, + del num_spokes_per_seg, residual_spokes, segs, spoke_loc, + del seg_buffer_size, seg_size, seg_data, num_echos + ## spoke timing correction relaated + del cur_spoke, spoke_id, ref_timestamps, interp_func + del frame_offset, corrected_seg_data + del fid_f + + print("\n++ Reconstruction (FID -> Image[complex])") + with open(stc_f.name, 'r+b') as fid_f: + # Reconstruction + fid_f.seek(0) + recon_f.seek(0) + for n in tqdm(range(num_frames), desc=' - Frames', file=sys.stdout, ncols=tqdm_ncols): + buffer = fid_f.read(stc_buffer_size) + v = np.frombuffer(buffer, dtype=stc_dtype).reshape(params['fid_shape'], order='F') + v = (v[0] + 1j*v[1])[np.newaxis, ...] + ksp = v.squeeze().T + recon_vol = sp.nufft_adjoint(ksp * dcf, coord, oshape=oshape) + if n == 0: + recon_dtype = recon_vol.dtype + recon_f.write(recon_vol.T.flatten(order='C').tobytes()) + _caches.append(stc_f.name) + print(" + Success") + + # Without spoke timing correction + else: + print("\n++ Reconstruction (FID -> Image[complex])") + with pvobj._open_object(pvobj._fid[scan_id]) as fid_f: + fid_f.seek(recon_buffer_offset) + recon_f.seek(0) + for n in tqdm(range(num_frames), desc=' - Frames', file=sys.stdout, ncols=tqdm_ncols): + buffer = fid_f.read(raw_buffer_size) + v = np.frombuffer(buffer, dtype=raw_dtype).reshape(params['fid_shape'], order='F') + v = (v[0] + 1j*v[1])[np.newaxis, ...] + ksp = v.squeeze().T + recon_vol = sp.nufft_adjoint(ksp * dcf, coord, oshape=oshape) + if n == 0: + recon_dtype = recon_vol.dtype + recon_f.write(recon_vol.T.flatten(order='C').tobytes()) + print(" + Success") + + def calc_slope_inter(data): + inter = np.min(data) + dmax = np.max(data) + slope = (dmax - inter) / 2**16 + + if data.ndim > 3: + converted_data = [] + for t in tqdm(range(data.shape[-1]), desc=' - Frame', file=sys.stdout, ncols=tqdm_ncols): + converted_data.append(((data[..., t] - inter) / slope).round().astype(np.uint16)[..., np.newaxis]) + converted_data = np.concatenate(converted_data, axis=-1) + else: + converted_data = ((data - inter) / slope).round().astype(np.uint16) + return converted_data, slope, inter + + # Save to NIFTI file + with open(recon_f.name, 'r+b') as img_f: + print(f" + Converting dtype (complex -> float32)...", end='') + img = np.abs(np.frombuffer(img_f.read(), dtype=recon_dtype).reshape(oshape + [num_frames], order='F')) + print('success') + + + print(f"\n++ Creating Nifti image") + output_fpath = f'{prefix}.nii.gz' + print(f" + convert dtype to UINT16") + img, slope, inter = calc_slope_inter(img) + img = img.squeeze() + print(f" - Slope: {slope:.3f}") + print(f" - Intercept: {inter:.3f}") + print(f" - Min: {img.min()}") + print(f" - Max: {img.max()}") + method = raw.get_method(scan_id).parameters + affine = np.array(method['PVM_SpatResol'] + [1]) + position = -1 * np.array(method['PVM_Fov']) * ext_factor / 2 + grad_matrix = np.round(np.array(method['PVM_SPackArrGradOrient'][0]).T, decimals=0).astype(np.int16) + axis_order = np.arange(img.ndim) + axis_order[:3] = tuple([int(np.squeeze(np.nonzero(ax))) for ax in grad_matrix]) + flip_axis = np.nonzero(grad_matrix.sum(0) < 0)[0].tolist() + + affine[flip_axis] *= -1 + affine = np.diag(affine) + affine[:3, 3] = position.dot(np.abs(grad_matrix.T)) + + img = img.transpose(axis_order) + img = np.flip(img, flip_axis) + + # position correction + print(f"\n + calculating affine matrix") + axis_order[:3] = [0, 2, 1] + img = img.transpose(axis_order) + # img = np.flip(img, 2) + img = np.flip(img, 1) + affine[:, [1, 2]] = affine[:, [2, 1]] + affine[[1, 2], :] = affine[[2, 1], :] + affine[:, 0] *= -1 + affine[2, 3] *= -1 + # affine[:, 2] *= -1 + print(" - roration:") + for i in range(3): + print(f" {affine[i, 0]}, {affine[i, 1]}, {affine[i, 2]}") + print(f" - position(LPS): {affine[0,3]:.3f}, {affine[1,3]:.3f}, {affine[2,3]:.3f}") + + # save results + print(f" - Saving {output_fpath}...", end='') + niiobj = nib.Nifti1Image(img, affine) + niiobj.set_qform(affine, 1) + niiobj.set_sform(affine, 0) + niiobj.header.set_slope_inter(slope, inter) + niiobj.header['pixdim'][4] = scan_time_per_vol + niiobj.to_filename(output_fpath) + print(f"success") + + _caches.append(recon_f.name) + if args.clear_cache: + print("\n++ Clear cache for --clear-cache") + for f in _caches: + os.remove(f) + else: + cache_fpath = f'{prefix}_cache.log' + print(f"\n++ Saving cache file: {cache_fpath}") + with open(cache_fpath, 'w+t') as log_f: + for f in _caches: + log_f.write(f + '\n') + +if __name__ == "__main__": + main() \ No newline at end of file From 075b5fec310c4bde909246979f024441917cd7c2 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 20:57:30 -0400 Subject: [PATCH 13/14] [remove] Isolated plugin module to separate repository 'brkraw-plugin' --- plugin/sordino/config.yaml | 50 ----- plugin/sordino/sordino.py | 384 ---------------------------------- plugin/sordino/sordino2nii.py | 380 --------------------------------- 3 files changed, 814 deletions(-) delete mode 100644 plugin/sordino/config.yaml delete mode 100644 plugin/sordino/sordino.py delete mode 100644 plugin/sordino/sordino2nii.py diff --git a/plugin/sordino/config.yaml b/plugin/sordino/config.yaml deleted file mode 100644 index 18cf2eb..0000000 --- a/plugin/sordino/config.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: sordino - -version: 24.4.27 - -authors: -- Sung-Ho Lee (shlee@unc.edu) - -license: MIT -description: "This plugin converts SORDINO-MRI (acquired in radial trajectory) to NIfTI format." -documentation_url: "https://github.com/sordino" - -app: - target: tonifti - src: sordino.py::Sordino - -dependencies: -- numpy: ">=1.18" -- scipy: ">=1.11.1" -- sigpy: ">=0.1.26" - -script: - name: "sordino" - help: "plugin function for SORDINO-MRI reconstruction" - arguments: - - name: "-e" - long_name: "--extention" - help: "Extension factors for FOV regridding (LR AP SI)" - nargs: "+" - type: "list, int" - default: [1,1,1] - - name: "--offset" - help: "Index of offset frame (start point of reconstruction)" - type: "int" - default: null - - name: "--num-frames" - help: "Specify number of frames from offset." - type: "int" - default: null - - name: "--tmpdir" - help: "Specify number of frames from offset." - type: "int" - default: null - - name: "--spoketiming" - help: "apply spoke timing correction." - action: "store_true" - default: false - - name: "--mem-limit" - help: "set limit of memory size when loading data (in GB)." - type: "float" - default: 0.5 diff --git a/plugin/sordino/sordino.py b/plugin/sordino/sordino.py deleted file mode 100644 index 83358d8..0000000 --- a/plugin/sordino/sordino.py +++ /dev/null @@ -1,384 +0,0 @@ -from __future__ import annotations -import os -import sys -import warnings -import tempfile -import numpy as np -import sigpy as sp -from pathlib import Path -from tqdm import tqdm -from scipy.interpolate import interp1d -from brkraw.app.tonifti import BasePlugin -from brkraw.app.tonifti.base import ScaleMode -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from nibabel.nifti1 import Nifti1Header - from typing import List, Union, Optional, Tuple - from brkraw.app.tonifti import PvScan, PvFiles - from io import BufferedReader, BufferedWriter - from zipfile import ZipExtFile - -class Sordino(BasePlugin): - """ SORDINO: Plugin to Convert Bruker's SORDINO Images to NifTi File for ToNifti App in Brkraw - ParaVision Compatability: > 360.x - """ - _recon_dtype: Optional[np.dtype] = None - - def __init__(self, pvobj: Union['PvScan', 'PvFiles'], - ext_factors: List[float, float, float]=None, - offset: Optional[int]=None, - num_frames: Optional[int]=None, - spoketiming: Optional[bool]=False, - mem_limit: Optional[float]=None, - tmpdir: Optional[Path]=None, - nufft: Optional[str]='sigpy', - scale_mode: Optional[ScaleMode]=None, - **kwargs - ) -> None: - super().__init__(pvobj, **kwargs) - self._inspect() - self._set_params() - self._set_cache(tmpdir) - self.ext_factors: List[float, float, float] = ext_factors or [1,1,1] - self.offset: int = offset or 0 - self.spoketiming: bool = spoketiming - self.mem_limit: Optional[float] = mem_limit - self.num_frames: Optional[int] = num_frames or self._num_frames - self.nufft: str = nufft - self.scale_mode: ScaleMode = scale_mode or ScaleMode.HEADER - self.slope, self.inter = 1, 0 - - def get_dataobj(self) -> np.ndarray: - self._set_trajectory() - if self.spoketiming and self.num_frames > 1: - bufferobj, buffer_size = self._fid_correct_spoketiming() - dataobj = self._recon_fid(bufferobj, buffer_size) - else: - dataobj = self._recon_fid() - dataobj = np.abs(dataobj) # magnitude image only - dataobj = self._dataobj_correct_orientation(dataobj) - if self.scale_mode == ScaleMode.HEADER: - self._calc_slope_inter(dataobj) - dataobj = self._dataobj_rescale_to_uint16(dataobj) - return dataobj - - def get_affine(self, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, - subj_position:Optional[str]=None) -> np.ndarray: - return super().get_affine(self, reco_id=reco_id, - subj_type=subj_type, - subj_position=subj_position) - - def get_nifti1header(self) -> Nifti1Header: - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - header = super().get_nifti1header(self) - if self.scale_mode == ScaleMode.HEADER: - header.set_slope_inter(*self._calc_slope_inter()) - return header - - def _calc_slope_inter(self, dataobj: np.ndarray): - dmax = np.max(dataobj) - self.inter = np.min(dataobj) - self.slope = (dmax - self.inter) / 2**16 - - def _dataobj_rescale_to_uint16(self, dataobj: np.ndarray) -> np.ndarray: - if self.verbose: - print(f" + convert dtype to UINT16") - print(f" - Slope: {self.slope:.3f}") - print(f" - Intercept: {self.inter:.3f}") - if dataobj.ndim > 3: - converted_dataobj = [] - for t in tqdm(range(dataobj.shape[-1]), desc=' - Frame', file=sys.stdout, ncols=80): - converted_dataobj.append(((dataobj[..., t] - self.inter) / self.slope).round().astype(np.uint16)[..., np.newaxis]) - converted_dataobj = np.concatenate(converted_dataobj, axis=-1) - else: - converted_dataobj = ((dataobj - self.inter) / self.slope).round().astype(np.uint16) - return converted_dataobj - - def _set_cache(self, tmpdir: Optional[Path]) -> None: - self.tmpdir: Path = tmpdir or Path.home() / '.tmp' - self.tmpdir.mkdir(exist_ok=True) - - def _inspect(self) -> None: - """ - Inspect the provided pvobj to ensure it is compatible with this plugin. - - Args: - pvobj (object): The object to be validated for compatibility. - - Raises: - NotImplementedError: If the pvobj's type or attributes do not meet the plugin's requirements. - - This method checks for the necessary attributes in the pvobj required for SORDINO reconstruction. - It also validates that the pvobj is not an instance of 'PvReco', as support for this type is not implemented. - """ - if self.verbose: - print("++ Inspecting compatibility of imported pvobj for SORDINO reconstruction...") - if self.pvobj.isinstance('PvReco'): - raise NotImplementedError("Support for 'PvReco' is not implemented.") - required_fields = ['method', 'acqp', 'traj', 'rawdata.job0'] - missing_fields = [field for field in required_fields if not hasattr(self.pvobj, field)] - if missing_fields: - error_message = f"Input pvobj is missing required fields: {', '.join(missing_fields)}" - raise NotImplementedError(error_message) - if not hasattr(self.info, 'orientation'): - warnings.warn("Input object is missing the 'orientation' attribute. This is not critical, but the orientation of the reconstructed image might be incorrect.", UserWarning) - - def _get_fid(self) -> Union[BufferedReader, ZipExtFile]: - return self.pvobj.get_fid() - - def _get_traj(self) -> Union[BufferedReader, ZipExtFile]: - return self.pvobj['traj'] - - def _set_params(self) -> None: - # acqp params - if self.verbose: - print("\n++ Fetch parameters from PvObj...") - acqp = self.pvobj.acqp - nr = acqp['NR'] - ni = acqp['NI'] - nae = acqp['NAE'] - ns = acqp['NSLICES'] - acq_jobs = acqp['ACQ_jobs'][0] - - self.repetition_time = acqp['ACQ_repetition_time'] / 1000 - self.fid_shape = np.array([2, acq_jobs[0]/2, acq_jobs[3]/(nr*nae), acq_jobs[-2], ns]).astype(int).tolist() - self._num_frames = int(nr * ni / ns) - self.dtype_code = self.info.fid['dtype'] - self.buffer_size = np.prod(self.fid_shape) * self.dtype_code.itemsize - - # method - method = self.pvobj.method - npro = method['NPro'] - mat_size = method['PVM_Matrix'] - over_samp = method['OverSampling'] - - self.mat_size = np.array(mat_size) - self.num_pts = int((mat_size[0] * over_samp) / 2) - self.smp_idx = np.arange(0, int(mat_size[2] / 2)) - self.smp_idx_oversmp = np.array([c + s for s in self.smp_idx \ - for c in np.linspace(0, 1 - 1 / over_samp, over_samp)]) - self.traj_shape = np.array([3, mat_size[0]/2, npro]).astype(int) - - def _set_trajectory(self) -> None: - with self._get_traj() as f: - self.traj = np.frombuffer(f.read(), np.double).reshape(self.traj_shape, order='F') - self._traj_apply_oversampling() - self._traj_correct_readout_direction() - self._traj_correct_projection_order() - if all(np.array(self.ext_factors) != 1): - if self.verbose: - ef = map('*'.join, list(zip(['x', 'y', 'z'], map(str, self.ext_factors)))) - print(f" + Extention factors: {', '.join(list(ef))}") - self._traj_apply_extension_factors() - - def _traj_apply_oversampling(self) -> None: - traj = np.zeros((3, self.num_pts, self.fid_shape[2])) - for i, coord in enumerate(self.traj): - step = np.mean(np.diff(coord, 1, axis=0), axis=0) - coord = np.insert(coord, -1, coord[-1, :], axis=0) - coord[-1, :] = coord[-2, :] + step - func = interp1d(np.append(self.smp_idx, self.smp_idx[-1]+1), coord, axis=0) - # Evaluating trajectory at finer intervals - traj[i, :, :] = func(self.smp_idx_oversmp) - self.traj = traj - - def _traj_correct_readout_direction(self) -> None: - self.traj[:, :, ::2] = -self.traj[:, :, 1::2] - self.traj = np.multiply(self.traj, self.mat_size[:, np.newaxis, np.newaxis] - .repeat(self.traj.shape[1], 1) - .repeat(self.traj.shape[2], 2)) - - def _traj_correct_projection_order(self) -> None: - proj_order = np.concatenate([np.arange(0, self.fid_shape[2], 2), - np.arange(1, self.fid_shape[2], 2)]) - self.traj = self.traj[:, :, proj_order] - - def _traj_apply_extension_factors(self): - for i, ef in self.ext_factors: - self.traj[i] *= ef - - def _dataobj_correct_orientation(self, dataobj) -> None: - """Correct the subject orientation to match that of the online reconstructed image. - Note: This is experimental and has only been tested on a small number of cases. - Subject to updates and revisions.""" - # logical to physical orientation correction - grad_mat = self.pvobj.acqp['ACQ_GradientMatrix'][0] - corrected_dataobj = self._rotate_dataobj(dataobj, grad_mat) - if hasattr(self.info, 'orientation'): - visu_ori = self.info.orientation['orientation'] - corrected_dataobj = self._rotate_dataobj(corrected_dataobj, visu_ori) - return corrected_dataobj - - @staticmethod - def _rotate_dataobj(dataobj: np.ndarray, rotation_matrix) -> np.ndarray: - axis_order = np.nonzero(rotation_matrix.T)[1].tolist() - if len(dataobj.shape) > 3: - axis_order += [3] - corrected_dataobj = np.transpose(dataobj, axis_order) - x, y, z = rotation_matrix.sum(0) - return corrected_dataobj[::x, ::y, ::z, ...] - - def _recon_fid(self, filepath=None, buffer_size=None) -> np.ndarray: - buffer_size = buffer_size or self.buffer_size - output_shape = np.round(self.mat_size * self.ext_factors, decimals=0).tolist() - buffer_offset = 0 if filepath else self.offset * buffer_size - if self.verbose: - print("\n++ Reconstruction (FID -> Image[complex])") - print(f" + Output shape: {'x'.join(map(str, output_shape))}") - ndim = 4 if self.num_frames > 1 else 3 - print(f" + Output dim: {ndim}") - if self.offset: - print(f" + Frame offset: {self.offset}") - if ndim == 4: - print(f" + Output num of frames: {self.num_frames}") - with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=self.tmpdir) as f_recon: - if filepath: - with open(filepath, 'rb') as f_fid: - self._recon_process(f_fid, f_recon, buffer_offset, output_shape) - else: - with self._get_fid() as f_fid: - self._recon_process(f_fid, f_recon, buffer_offset, output_shape) - if self.verbose: - print(f" + Converting dtype (complex -> float32)...", end='') - shape = output_shape + [self.num_frames] if self.num_frames > 1 else output_shape - f_recon.seek(0) - recon = np.frombuffer(f_recon.read(), dtype=self._recon_dtype).reshape(shape, order='F') - if self.verbose: - print('Success') - self._buffers.append(f_recon) - return recon - - def _recon_process(self, - fid_buffer: Union['BufferedReader', 'ZipExtFile'], - recon_buffer: 'BufferedWriter', - buffer_offset: int, - output_shape: list) -> None: - fid_buffer.seek(buffer_offset) - if self.verbose: - framerange = tqdm(range(self.num_frames), desc=' -Frames', file=sys.stdout, ncols=80) - else: - framerange = range(self.num_frames) - for n in framerange: - buffer = fid_buffer.read(self.buffer_size) - fid = np.frombuffer(buffer, dtype=self.dtype_code).reshape(self.fid_shape, order='F') - fid = (fid[0] + 1j*fid[1])[np.newaxis, ...] - volume = self._apply_nufft(fid, output_shape) - if n == 0: - self._recon_dtype = volume.dtype - recon_buffer.write(volume.T.flatten(order='C').tobytes()) - - def _apply_nufft(self, fid: np.ndarray, output_shape: list) -> np.ndarray: - if self.nufft == 'sigpy': - return self._sigpy_nufft(fid, output_shape) - else: - raise NotImplementedError - - def _sigpy_nufft(self, fid: np.ndarray, output_shape: list) -> np.ndarray: - dcf = np.square(self.traj).sum(0).T - return sp.nufft_adjoint(fid.squeeze().T * dcf, self.traj.T, oshape=output_shape) - - def _fid_correct_spoketiming(self) -> Tuple[str, int]: - # parameters for spoke timing correction - num_spokes = self.fid_shape[2] - - if self.verbose: - print("\n++ Running spoke timing correction") - - with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=self.tmpdir) as stc_f: - with self._get_fid() as fid_f: - file_size = self._fid_get_filesize(fid_f) - segs = self._fid_split_by_filesize(file_size, num_spokes) - stc_buffer_size = self._fid_process_spoketiming(segs, fid_f, stc_f, num_spokes) - self._buffers.append(stc_f) - return stc_f.name, stc_buffer_size - - def _fid_get_filesize(self, fid_buffer: Union['BufferedReader', 'ZipExtFile']) -> float: - if self.pvobj.is_compressed: - fid_fname = os.path.basename(fid_buffer.name) - fid_idx = [i for i, f in enumerate(self.pvobj._contents['files']) if fid_fname in f].pop() - file_size = self.pvobj._contents['file_sizes'][fid_idx] - else: - file_size = os.fstat(fid_buffer.fileno()).st_size - return ((file_size / self._num_frames) * self.num_frames) / 1024**3 # in GB - - def _fid_split_by_filesize(self, file_size: float, num_spokes: int) -> List[int]: - num_segs = np.ceil(file_size / self.mem_limit).astype(int) if self.mem_limit else 1 - if self.verbose: - print(f' + Size: {file_size:.3f} GB') - print(f' + Split data into {num_segs} segments for saving memory.') - - num_spokes_per_seg = int(np.ceil(num_spokes / num_segs)) if num_segs > 1 else num_spokes - if residual_spokes := num_spokes % num_spokes_per_seg: - segs = [num_spokes_per_seg for _ in range(num_segs -1)] + [residual_spokes] - else: - segs = [num_spokes_per_seg for _ in range(num_segs)] - return segs - - def _fid_process_spoketiming(self, segs: list[int], - fid_buffer: Union['BufferedReader', 'ZipExtFile'], - img_buffer: 'BufferedWriter', - num_spokes: int) -> int: - num_echos = self.fid_shape[1] - recon_buffer_offset = self.offset * self.buffer_size - - spoke_loc = 0 - stc_dtype = None - stc_buffer_size = None - - if self.verbose: - segrange = tqdm(segs, desc=' - Segments', file=sys.stdout, ncols=80) - else: - segrange = segs - for seg_size in segrange: - # load data - spoke_buffer_size = int(self.buffer_size/num_spokes) - spoke_offset = spoke_loc * spoke_buffer_size - # total buffer size for current segment - seg_buffer_size = spoke_buffer_size * seg_size - seg = [] - for t in range(self.num_frames): - frame_offset = t * self.buffer_size - fid_buffer.seek(recon_buffer_offset + frame_offset + spoke_offset) - seg.append(fid_buffer.read(seg_buffer_size)) - seg_shape = [2, num_echos, seg_size, self.num_frames] - seg_data = np.frombuffer(b''.join(seg), dtype=self.dtype_code).reshape(seg_shape, order='F') - # Spoke timing correction - corrected_seg_data = self._fid_interpolate_segment(seg_data, seg_size, num_echos, - num_spokes, spoke_loc) - - # Store data - for t in range(self.num_frames): - frame_offset = t * self.buffer_size - img_buffer.seek(frame_offset + spoke_offset) - img_buffer.write(corrected_seg_data[:,:,:, t].flatten(order='F').tobytes()) - - if not stc_dtype: - stc_dtype = corrected_seg_data.dtype - stc_buffer_size = np.prod(self.fid_shape) * stc_dtype.itemsize - spoke_loc += seg_size - return stc_buffer_size - - def _fid_interpolate_segment(self, - seg_data: np.ndarray, - seg_size: int, - num_echos: int, - num_spokes: int, - spoke_loc: int) -> np.ndarray: - scan_time_per_vol = num_spokes * self.repetition_time - target_timing = scan_time_per_vol / 2 - base_timestamps = np.arange(self.num_frames) * scan_time_per_vol - target_timestamps = base_timestamps + target_timing - corrected_seg_data = np.empty_like(seg_data) - for spoke_id in range(seg_size): - cur_spoke = spoke_loc + spoke_id - ref_timestamps = base_timestamps + (cur_spoke * self.repetition_time) - for c in range(2): # real and imaginary (complex) - for e in range(num_echos): - interp_func = interp1d(ref_timestamps, - seg_data[c, e, spoke_id, :], - kind='linear', - fill_value='extrapolate') - corrected_seg_data[c, e, spoke_id, :] = interp_func(target_timestamps) - return corrected_seg_data diff --git a/plugin/sordino/sordino2nii.py b/plugin/sordino/sordino2nii.py deleted file mode 100644 index da50ac3..0000000 --- a/plugin/sordino/sordino2nii.py +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import os -import sys -import tempfile -import brkraw as brk -from typing import Tuple, List, Optional, Any -import sigpy as sp -import nibabel as nib -import numpy as np -from numpy.fft import fftn, ifftn, fftshift, ifftshift -from scipy.signal import get_window -from scipy.interpolate import interp1d -from tqdm import tqdm - -# Dependencies brkraw, sigpy, nibabel, numpy, scipy, and tdqm -# tested on Python 3.8, 3.10 -# Author: SungHo Lee(shlee@unc.edu) - -## Brk-bart -def parse_acqp(rawobj, scan_id): - # acqp parameter parsing - acqp = rawobj.get_acqp(scan_id).parameters - nr = acqp['NR'] - ni = acqp['NI'] - nae = acqp['NAE'] # number of average - ns = acqp['NSLICES'] - acq_jobs = acqp['ACQ_jobs'][0] - sz = [acq_jobs[0]/2, acq_jobs[3]/(nr*nae), acq_jobs[-2]] - - wordtype = brk.lib.reference.WORDTYPE[f'_{"".join(acqp["ACQ_word_size"].split("_"))}_SGN_INT'] - byteorder = brk.lib.reference.BYTEORDER[f'{acqp["BYTORDA"]}Endian'] - - tr = acqp['ACQ_repetition_time'] / 1000 - dtype_code = np.dtype(f'{byteorder}{wordtype}') - fid_shape = np.array([2] + sz + [ns]).astype(int).tolist() - num_frames = int(nr*ni/ns) - buffer_size = np.prod(fid_shape)*dtype_code.itemsize - - return dict(fid_shape=fid_shape, - dtype_code=dtype_code, - num_frames=num_frames, - buffer_size=buffer_size, - repetition_time=tr) - - -def get_traj(rawobj, scan_id): - method = rawobj.get_method(scan_id).parameters - traj_shape = np.array( - [3, method['PVM_Matrix'][0]/2, method['NPro']]).astype(int) - return np.frombuffer(rawobj._pvobj.get_traj(scan_id), np.double).reshape(traj_shape, order='F') - - -def calc_trajectory(rawobj, scan_id, params, ext_factors): - method = rawobj.get_method(scan_id).parameters - over_samp = method['OverSampling'] - mat_size = np.array(method['PVM_Matrix']) - - num_pts = int((mat_size[0] * over_samp) / 2) - smp_idx = np.arange(0, int(mat_size[0] / 2)) - smp_idx_oversmp = np.array( - [c+s for s in smp_idx for c in np.linspace(0, 1-1/over_samp, over_samp)]) - - # trajectory - traj = get_traj(rawobj, scan_id) - - # calculate trajectory oversampling - # axis2 = number of projection - traj_oversmp = np.zeros((3, num_pts, params['fid_shape'][2])) - for i, coord in enumerate(traj): - - step = np.mean(np.diff(coord, 1, axis=0), axis=0) - - coord = np.insert(coord, -1, coord[-1, :], axis=0) - coord[-1, :] = coord[-2, :] + step - func = interp1d(np.append(smp_idx, smp_idx[-1]+1), coord, axis=0) - - # Evaluating trajectory at finer intervals - traj_oversmp[i, :, :] = func(smp_idx_oversmp) - - traj_oversmp[:, :, ::2] = -traj_oversmp[:, :, 1::2] # correct direction - - traj_oversmp = np.multiply(traj_oversmp, mat_size[:, np.newaxis, np.newaxis] - .repeat(traj_oversmp.shape[1], 1) - .repeat(traj_oversmp.shape[2], 2)) - - proj_order = np.concatenate([np.arange(0, params['fid_shape'][2], 2), - np.arange(1, params['fid_shape'][2], 2)]) - - # apply extend FOV factor - traj_adjusted = traj_oversmp[:, :, proj_order] - traj_adjusted[0] *= ext_factors[0] - traj_adjusted[1] *= ext_factors[1] - traj_adjusted[2] *= ext_factors[2] - return traj_adjusted - - -def validate_int_or_list(value): - if len(value) in {1, 3}: - return int(value) - else: - raise argparse.ArgumentTypeError("Size must be either 1 or 3.") - -__version__ = '24.1.01' - -def main(): - parser = argparse.ArgumentParser(prog="sordino2nii", description='Reconstruction tool for SORDINO fMRI sequence') - - parser.add_argument("-i", "--input", help="input path of raw data", type=str, default=None) - parser.add_argument("-o", "--prefix", help='output prefix of reconstructed image', type=str, default='output') - parser.add_argument("-s", "--scanid", help="scan id", type=int, default=None) - parser.add_argument("-e", "--extention", help="Extension factors for FOV regridding (LR AP SI)", nargs="+", - type=validate_int_or_list, default=[1,1,1]) - parser.add_argument("--offset", help="Index of offset frame (start point of reconstruction)", type=int, default=0) - parser.add_argument("--num-frames", help='Number of frames to be reconstructed (from offset)', type=int, default=None) - parser.add_argument("--tmpdir", help="folder to store temporary file", type=str, default=None) - parser.add_argument("--spoketiming", help="apply spoke timing correction", action='store_true') - parser.add_argument("--clear-cache", help='delete intermediate binary files generated during reconstruction', action='store_true') - parser.add_argument("--mem-limit", help='set limit of memory size when loading data (in GB)', type=float, default=0.5) - - print(f"++ sordino2nii(v{__version__}): Reconstruction tool for SORDINO fMRI sequence") - print("++ Authored by: SungHo Lee (email: shlee@unc.edu)") - - args = parser.parse_args() - tqdm_ncols = 100 - # fetch variables - mem_limit = args.mem_limit - scan_id = args.scanid - offset = args.offset - ext_factor = args.extention * 3 if len(args.extention) == 1 else args.extention - # gamma = args.gamma[0] if len(args.gamma) == 1 else args.gamma - prefix = args.prefix - - # create temporary folder - tmpdir = args.tmpdir or os.path.join(os.curdir, '.tmp') - os.makedirs(tmpdir, exist_ok=True) - - # loading data - try: - print(f"\n++ Loading input Bruker rawdata {args.input}") - raw = brk.load(args.input) - params = parse_acqp(raw, scan_id) - pvobj = raw._pvobj - num_frames = args.num_frames or params['num_frames'] - except: - parser.print_help() - sys.exit() - - # parameters for reconstruction - print("\n++ Fetch parameters from rawdata") - num_echos, num_spokes = params['fid_shape'][1:3] - raw_buffer_size = params['buffer_size'] - raw_dtype = params['dtype_code'] - recon_buffer_offset = offset * raw_buffer_size - print(f" + Extention factors: x*{ext_factor[0]}, y*{ext_factor[1]}, z*{ext_factor[2]}") - ext_factor = [ext_factor[1], ext_factor[0], ext_factor[2]] - ## calculate trajectory - visu_pars = raw.get_visu_pars(scan_id, 1).parameters - traj = calc_trajectory(raw, scan_id, params, ext_factor) - coord = traj.T - dcf = coord[..., 0] ** 2 + coord[..., 1] ** 2 + coord[..., 2] ** 2 - oshape = (np.array(visu_pars['VisuCoreSize']) * ext_factor).tolist() - print(f" + Output shape: {oshape[1]}x{oshape[0]}x{oshape[2]}") - ndim = 4 if num_frames > 1 else 3 - print(f" + Output dim: {ndim}") - if offset: - print(f" + Frame offset: {offset}") - if ndim == 4: - print(f" + Output num of frames: {num_frames}") - - # Image reconstruction - _caches = [] - with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=tmpdir) as recon_f: - recon_dtype = None - tr = params['repetition_time'] - scan_time_per_vol = num_spokes * tr - # Spoke timing correction - if args.spoketiming: - print("\n++ Running spoke timing correction for --spoketiming") - ## Run spoke timing correction - # parameters for spoke timing correction - - target_timing = scan_time_per_vol / 2 - base_timestamps = np.arange(num_frames) * scan_time_per_vol - target_timestamps = base_timestamps + target_timing - spoke_buffer_size = int(raw_buffer_size/num_spokes) - - with tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir=tmpdir) as stc_f: - with pvobj._open_object(pvobj._fid[scan_id]) as fid_f: - # print the size of file in GB - try: - file_size = pvobj.filelist[pvobj._fid[scan_id]].file_size / params['num_frames'] * num_frames / 1024**3 - except: - file_size = os.path.getsize(pvobj._fid[scan_id]) / params['num_frames'] * num_frames / 1024**3 - print(f' + Size: {file_size:.3f} GB') - - # for safety reason, cut data into the size defined at limit_mem_size (in GB) - num_segs = np.ceil(file_size / mem_limit).astype(int) - print(f' + Split data into {num_segs} segments for saving memory.') - - num_spokes_per_seg = int(np.ceil(num_spokes / num_segs)) - if residual_spokes := num_spokes % num_spokes_per_seg: - segs = [num_spokes_per_seg for _ in range(num_segs -1)] + [residual_spokes] - else: - segs = [num_spokes_per_seg for _ in range(num_segs)] - - spoke_loc = 0 - stc_dtype = None - stc_buffer_size = None - for seg_size in tqdm(segs, desc=' - Segments', file=sys.stdout, ncols=tqdm_ncols): - # load data - spoke_offset = spoke_loc * spoke_buffer_size - seg_buffer_size = spoke_buffer_size * seg_size # total buffer size for current segment - seg = [] - for t in range(num_frames): - frame_offset = t * raw_buffer_size - fid_f.seek(recon_buffer_offset + frame_offset + spoke_offset) - seg.append(fid_f.read(seg_buffer_size)) - seg_data = np.frombuffer(b''.join(seg), dtype=raw_dtype).reshape([2, num_echos, - seg_size, num_frames], - order='F') - # Spoke timing correction - corrected_seg_data = np.empty_like(seg_data) - for spoke_id in range(seg_size): - cur_spoke = spoke_loc + spoke_id - ref_timestamps = base_timestamps + (cur_spoke * tr) - for c in range(2): # real and imaginary (complex) - for e in range(num_echos): - interp_func = interp1d(ref_timestamps, - seg_data[c, e, spoke_id, :], - kind='linear', - fill_value='extrapolate') - corrected_seg_data[c, e, spoke_id, :] = interp_func(target_timestamps) - - - # Store data - for t in range(num_frames): - frame_offset = t * raw_buffer_size - stc_f.seek(frame_offset + spoke_offset) - stc_f.write(corrected_seg_data[:,:,:, t].flatten(order='F').tobytes()) - - if not stc_dtype: - stc_dtype = corrected_seg_data.dtype - stc_buffer_size = np.prod(params['fid_shape']) * stc_dtype.itemsize - - spoke_loc += seg_size - print(' + Success') - # clear memory (we only needs stc_f, stc_dtype, stc_buffer_size) - ## spoke timing prep related namespaces - del tr, target_timing, - del base_timestamps, target_timestamps, spoke_buffer_size - ## segmenting related namespaces - del file_size, mem_limit, num_segs, - del num_spokes_per_seg, residual_spokes, segs, spoke_loc, - del seg_buffer_size, seg_size, seg_data, num_echos - ## spoke timing correction relaated - del cur_spoke, spoke_id, ref_timestamps, interp_func - del frame_offset, corrected_seg_data - del fid_f - - print("\n++ Reconstruction (FID -> Image[complex])") - with open(stc_f.name, 'r+b') as fid_f: - # Reconstruction - fid_f.seek(0) - recon_f.seek(0) - for n in tqdm(range(num_frames), desc=' - Frames', file=sys.stdout, ncols=tqdm_ncols): - buffer = fid_f.read(stc_buffer_size) - v = np.frombuffer(buffer, dtype=stc_dtype).reshape(params['fid_shape'], order='F') - v = (v[0] + 1j*v[1])[np.newaxis, ...] - ksp = v.squeeze().T - recon_vol = sp.nufft_adjoint(ksp * dcf, coord, oshape=oshape) - if n == 0: - recon_dtype = recon_vol.dtype - recon_f.write(recon_vol.T.flatten(order='C').tobytes()) - _caches.append(stc_f.name) - print(" + Success") - - # Without spoke timing correction - else: - print("\n++ Reconstruction (FID -> Image[complex])") - with pvobj._open_object(pvobj._fid[scan_id]) as fid_f: - fid_f.seek(recon_buffer_offset) - recon_f.seek(0) - for n in tqdm(range(num_frames), desc=' - Frames', file=sys.stdout, ncols=tqdm_ncols): - buffer = fid_f.read(raw_buffer_size) - v = np.frombuffer(buffer, dtype=raw_dtype).reshape(params['fid_shape'], order='F') - v = (v[0] + 1j*v[1])[np.newaxis, ...] - ksp = v.squeeze().T - recon_vol = sp.nufft_adjoint(ksp * dcf, coord, oshape=oshape) - if n == 0: - recon_dtype = recon_vol.dtype - recon_f.write(recon_vol.T.flatten(order='C').tobytes()) - print(" + Success") - - def calc_slope_inter(data): - inter = np.min(data) - dmax = np.max(data) - slope = (dmax - inter) / 2**16 - - if data.ndim > 3: - converted_data = [] - for t in tqdm(range(data.shape[-1]), desc=' - Frame', file=sys.stdout, ncols=tqdm_ncols): - converted_data.append(((data[..., t] - inter) / slope).round().astype(np.uint16)[..., np.newaxis]) - converted_data = np.concatenate(converted_data, axis=-1) - else: - converted_data = ((data - inter) / slope).round().astype(np.uint16) - return converted_data, slope, inter - - # Save to NIFTI file - with open(recon_f.name, 'r+b') as img_f: - print(f" + Converting dtype (complex -> float32)...", end='') - img = np.abs(np.frombuffer(img_f.read(), dtype=recon_dtype).reshape(oshape + [num_frames], order='F')) - print('success') - - - print(f"\n++ Creating Nifti image") - output_fpath = f'{prefix}.nii.gz' - print(f" + convert dtype to UINT16") - img, slope, inter = calc_slope_inter(img) - img = img.squeeze() - print(f" - Slope: {slope:.3f}") - print(f" - Intercept: {inter:.3f}") - print(f" - Min: {img.min()}") - print(f" - Max: {img.max()}") - method = raw.get_method(scan_id).parameters - affine = np.array(method['PVM_SpatResol'] + [1]) - position = -1 * np.array(method['PVM_Fov']) * ext_factor / 2 - grad_matrix = np.round(np.array(method['PVM_SPackArrGradOrient'][0]).T, decimals=0).astype(np.int16) - axis_order = np.arange(img.ndim) - axis_order[:3] = tuple([int(np.squeeze(np.nonzero(ax))) for ax in grad_matrix]) - flip_axis = np.nonzero(grad_matrix.sum(0) < 0)[0].tolist() - - affine[flip_axis] *= -1 - affine = np.diag(affine) - affine[:3, 3] = position.dot(np.abs(grad_matrix.T)) - - img = img.transpose(axis_order) - img = np.flip(img, flip_axis) - - # position correction - print(f"\n + calculating affine matrix") - axis_order[:3] = [0, 2, 1] - img = img.transpose(axis_order) - # img = np.flip(img, 2) - img = np.flip(img, 1) - affine[:, [1, 2]] = affine[:, [2, 1]] - affine[[1, 2], :] = affine[[2, 1], :] - affine[:, 0] *= -1 - affine[2, 3] *= -1 - # affine[:, 2] *= -1 - print(" - roration:") - for i in range(3): - print(f" {affine[i, 0]}, {affine[i, 1]}, {affine[i, 2]}") - print(f" - position(LPS): {affine[0,3]:.3f}, {affine[1,3]:.3f}, {affine[2,3]:.3f}") - - # save results - print(f" - Saving {output_fpath}...", end='') - niiobj = nib.Nifti1Image(img, affine) - niiobj.set_qform(affine, 1) - niiobj.set_sform(affine, 0) - niiobj.header.set_slope_inter(slope, inter) - niiobj.header['pixdim'][4] = scan_time_per_vol - niiobj.to_filename(output_fpath) - print(f"success") - - _caches.append(recon_f.name) - if args.clear_cache: - print("\n++ Clear cache for --clear-cache") - for f in _caches: - os.remove(f) - else: - cache_fpath = f'{prefix}_cache.log' - print(f"\n++ Saving cache file: {cache_fpath}") - with open(cache_fpath, 'w+t') as log_f: - for f in _caches: - log_f.write(f + '\n') - -if __name__ == "__main__": - main() \ No newline at end of file From 9f4120b831cdd9752979f0358b2826c13b2b3284 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Mon, 29 Apr 2024 21:19:27 -0400 Subject: [PATCH 14/14] Update CONTRIBUTING.md and Makefile --- CONTRIBUTING.md | 46 +++++++++++++++++++++++++++++++++------------- Makefile | 2 +- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3fa4da1..60452e4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,17 +1,37 @@ -# Contributing +# Contributing to BrkRaw -When contributing to this repository, please first discuss the change you wish to make via issue, -email, or any other method with the owners of this repository before making a change. +Thank you for your interest in contributing to BrkRaw! Whether you're tackling a bug, adding a new feature, or improving our documentation, every contribution is appreciated. This guide will help you get started with your contributions in the most effective way. -Please note we have a code of conduct, please follow it in all your interactions with the project. +## Ways to Contribute -## Pull Request Process +### Reporting Issues -1. Ensure any install or build dependencies are removed before the end of the layer when doing a - build. -2. Update the README.md with details of changes to the interface, this includes new environment - variables, exposed ports, useful file locations and container parameters. -3. Increase the version numbers in any examples files and the README.md to the new version that this - Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/). -4. You may merge the Pull Request in once you have the sign-off of two other developers, or if you - do not have permission to do that, you may request the second reviewer to merge it for you. \ No newline at end of file +If you encounter a bug, have a suggestion, or want to make a feature request, please use the Issues section. Include as much detail as possible and label your issue appropriately. + +### Pull Requests + +We welcome pull requests with open arms! Here’s how you can make one: + +- **Code Changes**: If you are updating the BrkRaw codebase, perhaps due to a ParaVision compatibility issue or to suggest a new standard, please make sure your changes are well-documented. +- **New Features**: If you're introducing a new feature, ensure that you include appropriate test scripts in the `tests` directory, following our standard testing workflow. Check our documentation for more details. +- **New Applications**: Contributions that significantly enhance community utility but cannot be integrated via the plugin architecture should be directed to the main BrkRaw package. + +Before creating a pull request, ensure that your code complies with the existing code style and that you have tested your changes locally. + +### Contributing to Child Repositories + +- **[plugin](../brkraw-plugin)**: For new functionalities at the app level, direct your contributions here. +- **[dataset](../brkraw-dataset)**: To add a new dataset that needs to be tested via BrkRaw CI for data conversion consistency and reliability, please contribute here. +- **[tutorial](../brkraw-tutorial)**: For new tutorials, tutorial revisions, or documentation that would help other users, please contribute to this repository. + +## Before You Start + +Please review the documentation and Q&A to see if your question has already been answered or if the feature has already been discussed. If you’re unsure about adding a feature or making a change, open an issue to discuss it first. + +## Contribution Guidelines + +- Ensure your contributions are clear and easy to understand. +- Include any necessary tests and documentation updates. +- Adhere to the coding standards and best practices as outlined in our project documentation. + +We look forward to your contributions and are excited to see what you come up with! \ No newline at end of file diff --git a/Makefile b/Makefile index 30cb7df..3a2516a 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ clean: rm -rf tests/tutorials tests/tutorials: - git clone https://github.com/BrkRaw/tutorials.git tests/tutorials + git clone https://github.com/BrkRaw/brkraw-tutorial.git tests/tutorials tests/tutorials/SampleData/20190724_114946_BRKRAW_1_1: tests/tutorials unzip -uq tests/tutorials/SampleData/20190724_114946_BRKRAW_1_1.zip -d tests/tutorials/SampleData/