diff --git a/.dockerignore b/.dockerignore index 769d3c5..2f1e449 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,6 +6,7 @@ env **/__pycache__ **/.pytest_cache +**/.mypy_cache .idea/** @@ -16,4 +17,5 @@ env tests/* paper -.DS_Store \ No newline at end of file +.DS_Store + diff --git a/.flake8 b/.flake8 index 43e2629..f93bc67 100644 --- a/.flake8 +++ b/.flake8 @@ -7,4 +7,7 @@ exclude = env, venv, max-line-length = 127 -max-complexity=10 \ No newline at end of file +max-complexity = 10 +ignore = W291, W293 +docstring-convention = google +mypy-config = ./mypy.ini \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1511338..6d1f9c5 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -6,4 +6,4 @@ Changes proposed in this pull request: - -@BrkRaw/Bruker +@BrkRaw/brkraw diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dc91c2c..b6b719c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -81,7 +81,7 @@ jobs: run: | python -m pip install --upgrade pip pip install .[dev] - pip install .[SimpleITK] + pip install .[legacy] - name: Install tutorial run: make tests/tutorials diff --git a/.gitignore b/.gitignore index e8825c8..daf002e 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,12 @@ build *.egg-info *.egg-info/* .DS_Store +.mypy_cache +.pytest_cache + +tests/.brkraw +tests/_*.ipynb tests/tutorials -_test*.py -_*.ipynb -_*.log \ No newline at end of file +tests/_datasets + +.python-version \ No newline at end of file diff --git a/README.md b/README.md index 19a2793..b97cc6f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ ## BrkRaw: A comprehensive tool to access raw Bruker Biospin MRI data #### Version: 0.3.11 - ### Description The ‘BrkRaw’ is a python module designed to provide a comprehensive tool to access raw data acquired from diff --git a/brkraw/__init__.py b/brkraw/__init__.py index 130a9c2..fbf5897 100644 --- a/brkraw/__init__.py +++ b/brkraw/__init__.py @@ -1,8 +1,13 @@ from .lib import * +from xnippet import XnippetManager -__version__ = '0.3.11' -__all__ = ['BrukerLoader', '__version__', 'config'] +__version__ = '0.4.0' +config = XnippetManager(package_name=__package__, + package_version=__version__, + package__file__=__file__, + config_filename='config.yaml') +__all__ = ['BrukerLoader', '__version__', 'config'] def load(path): return BrukerLoader(path) diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 261d4a1..5d4d0c7 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,5 @@ -from .data import Study -from ..config import ConfigManager +from xnippet.snippet.plugin import PlugIn as PlugInSnippet +from xnippet.formatter import PathFormatter -__all__ = ['Study', 'ConfigManager'] \ No newline at end of file + +__all__ = ['PlugInSnippet', 'PathFormatter'] \ No newline at end of file diff --git a/brkraw/api/analyzer/__init__.py b/brkraw/api/analyzer/__init__.py index dce03d3..9bc47d6 100644 --- a/brkraw/api/analyzer/__init__.py +++ b/brkraw/api/analyzer/__init__.py @@ -1,3 +1,16 @@ +"""Analyzer module initialization. + +This module imports and exposes various analyzer classes used to parse and process +information from raw datasets into more readable formats. Each analyzer provides +specific functionalities tailored to different aspects of data processing and analysis. + +Exposed Classes: + BaseAnalyzer: Provides common features and utilities shared among all analyzers. + ScanInfoAnalyzer: Specializes in parsing and analyzing scan information from raw datasets. + AffineAnalyzer: Handles the computation and analysis of affine matrices from dataset parameters. + DataArrayAnalyzer: Focuses on parsing and returning structured data arrays and related metadata. +""" + from .base import BaseAnalyzer from .scaninfo import ScanInfoAnalyzer from .affine import AffineAnalyzer diff --git a/brkraw/api/analyzer/affine.py b/brkraw/api/analyzer/affine.py index aa1a677..c6af743 100644 --- a/brkraw/api/analyzer/affine.py +++ b/brkraw/api/analyzer/affine.py @@ -1,3 +1,11 @@ +"""Affine Matrix Analyzer Module. + +This module focuses on analyzing and processing affine matrices derived from imaging data. +It provides functionalities to calculate, adjust, and standardize affine transformations based +on specific imaging parameters and subject orientations, thereby facilitating accurate spatial +orientation and alignment of imaging data. +""" + from __future__ import annotations from brkraw.api import helper from .base import BaseAnalyzer @@ -22,7 +30,24 @@ class AffineAnalyzer(BaseAnalyzer): + """Processes affine matrices from raw dataset parameters to ensure proper spatial orientation. + + This analyzer calculates affine matrices based on imaging data and subject configurations. + It supports various adjustments based on subject type and pose, ensuring the matrices are + suitable for specific analysis and visualization requirements. + + Args: + infoobj (ScanInfo): The information object containing imaging parameters and subject orientation. + + Attributes: + resolution (list[tuple]): Resolution details extracted from imaging data. + affine (np.ndarray or list[np.ndarray]): The calculated affine matrices. + subj_type (str): The type of the subject (e.g., Biped, Quadruped). + subj_position (str): The position of the subject during the scan. + """ def __init__(self, infoobj: 'ScanInfo'): + """Initialize the AffineAnalyzer with an information object. + """ infoobj = copy(infoobj) if infoobj.image['dim'] == 2: xr, yr = infoobj.image['resolution'] @@ -43,6 +68,8 @@ def __init__(self, infoobj: 'ScanInfo'): self.subj_position = infoobj.orientation['subject_position'] if hasattr(infoobj, 'orientation') else None def get_affine(self, subj_type: Optional[str] = None, subj_position: Optional[str] = None): + """Retrieve the affine matrix, applying corrections based on subject type and position. + """ subj_type = subj_type or self.subj_type subj_position = subj_position or self.subj_position if isinstance(self.affine, list): @@ -52,6 +79,8 @@ def get_affine(self, subj_type: Optional[str] = None, subj_position: Optional[st return affine def _calculate_affine(self, infoobj: 'ScanInfo', slicepack_id: Optional[int] = None): + """Calculate the initial affine matrix based on the imaging data and subject orientation. + """ sidx = infoobj.orientation['orientation_desc'][slicepack_id].index(2) \ if slicepack_id else infoobj.orientation['orientation_desc'].index(2) slice_orient = SLICEORIENT[sidx] @@ -69,12 +98,16 @@ def _calculate_affine(self, infoobj: 'ScanInfo', slicepack_id: Optional[int] = N @staticmethod def _correct_origin(orientation, volume_origin, slice_distance): + """Adjust the origin of the volume based on slice orientation and distance. + """ new_origin = orientation.dot(volume_origin) new_origin[-1] += slice_distance return orientation.T.dot(new_origin) @staticmethod def _compose_affine(resolution, orientation, volume_origin, slice_orient): + """Compose the affine transformation matrix using the provided resolution, orientation, and origin. + """ resol = np.array(resolution) if slice_orient in ['axial', 'sagital']: resol = np.diag(resol) @@ -86,6 +119,8 @@ def _compose_affine(resolution, orientation, volume_origin, slice_orient): @staticmethod def _est_rotate_angle(subj_pose): + """Estimate the rotation angle needed based on the subject's pose. + """ rotate_angle = {'rad_x':0, 'rad_y':0, 'rad_z':0} if subj_pose: if subj_pose == 'Head_Supine': @@ -112,6 +147,8 @@ def _est_rotate_angle(subj_pose): @classmethod def _correct_orientation(cls, affine, subj_pose, subj_type): + """Correct the orientation of the affine matrix based on the subject's type and pose. + """ cls._inspect_subj_info(subj_pose, subj_type) rotate_angle = cls._est_rotate_angle(subj_pose) affine = helper.rotate_affine(affine, **rotate_angle) @@ -122,6 +159,8 @@ def _correct_orientation(cls, affine, subj_pose, subj_type): @staticmethod def _inspect_subj_info(subj_pose, subj_type): + """Validate subject type and pose information. + """ if subj_pose: part, side = subj_pose.split('_') assert part in SUBJPOSE['part'], 'Invalid subject position' diff --git a/brkraw/api/analyzer/base.py b/brkraw/api/analyzer/base.py index 76fa42d..b2894b6 100644 --- a/brkraw/api/analyzer/base.py +++ b/brkraw/api/analyzer/base.py @@ -1,3 +1,23 @@ +"""Base components for data analysis. + +This module provides foundational classes and utilities that are shared across different +analyzers within the helper module. These components serve as the base for more specialized +data processing and analysis tasks. +""" + class BaseAnalyzer: + """A base class providing common functionalities for data analyzers. + + This class serves as a parent to various specialized analyzers, providing shared methods + and utility functions to assist in data analysis tasks. + + Methods: + to_dict: Returns a dictionary representation of the instance's attributes. + """ def to_dict(self): + """Convert the analyzer's attributes to a dictionary format. + + Returns: + dict: A dictionary containing all attributes of the analyzer instance. + """ return self.__dict__ \ No newline at end of file diff --git a/brkraw/api/analyzer/dataarray.py b/brkraw/api/analyzer/dataarray.py index d435c89..882286e 100644 --- a/brkraw/api/analyzer/dataarray.py +++ b/brkraw/api/analyzer/dataarray.py @@ -1,21 +1,49 @@ +"""Data Array Analyzer Module. + +This module is dedicated to the analysis of data arrays, focusing on extracting and structuring +data array information from raw datasets. It provides functionalities to interpret and convert +data arrays into more accessible formats, complementing the broader data processing framework. +""" + from __future__ import annotations -from .base import BaseAnalyzer import numpy as np from copy import copy -from typing import TYPE_CHECKING, Union +from .base import BaseAnalyzer +from typing import TYPE_CHECKING if TYPE_CHECKING: from ..data import ScanInfo + from typing import Union from io import BufferedReader from zipfile import ZipExtFile class DataArrayAnalyzer(BaseAnalyzer): + """Analyzes specific data array information and returns structured data arrays and related metadata. + + This analyzer takes raw data array inputs and processes them to extract significant array metadata, + such as data type and shape, and prepares the data array for further analytical processing. + + Args: + infoobj (ScanInfo): The information object containing metadata related to data arrays. + fileobj (Union[BufferedReader, ZipExtFile]): The file object from which the data array is read. + + Attributes: + slope (float): The scaling factor applied to the data array values. + offset (float): The offset added to the data array values. + dtype (type): The data type of the data array. + shape (list[int]): The dimensions of the data array. + shape_desc (list[str]): Descriptions of the data array dimensions. + """ def __init__(self, infoobj: 'ScanInfo', fileobj: Union[BufferedReader, ZipExtFile]): + """Initialize the DataArrayAnalyzer with an information object and a file object. + """ infoobj = copy(infoobj) self._parse_info(infoobj) self.buffer = fileobj def _parse_info(self, infoobj: 'ScanInfo'): + """Parse the information object to set the data array properties such as slope, offset, and data type. + """ if not hasattr(infoobj, 'dataarray'): raise AttributeError self.slope = infoobj.dataarray['slope'] @@ -27,10 +55,14 @@ def _parse_info(self, infoobj: 'ScanInfo'): self._calc_array_shape(infoobj) def _calc_array_shape(self, infoobj: 'ScanInfo'): + """Calculate and extend the shape and description of the data array based on frame group information. + """ self.shape.extend(infoobj.frame_group['shape'][:]) self.shape_desc.extend([fgid.replace('FG_', '').lower() for fgid in infoobj.frame_group['id']]) def get_dataarray(self): + """Read and return the structured data array from the buffer, applying data type and shape transformations. + """ self.buffer.seek(0) return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 14cf84a..3560406 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -1,3 +1,10 @@ +"""Scan information analysis module. + +This module defines the ScanInfoAnalyzer, which is essential for parsing and interpreting +metadata from multiple parameter files, making it more human-readable and accessible +for further processing and analysis tasks. +""" + from __future__ import annotations from collections import OrderedDict from brkraw.api import helper @@ -10,18 +17,27 @@ class ScanInfoAnalyzer(BaseAnalyzer): """Helps parse metadata from multiple parameter files to make it more human-readable. + This analyzer is crucial for reconstructing and interpreting various scan parameters + from raw dataset files, supporting enhanced data insights and accessibility. + Args: - pvobj (PvScan): The PvScan object containing acquisition and method parameters. - reco_id (int, optional): The reconstruction ID. Defaults to None. + pvobj (Union[PvScan, PvReco, PvFiles]): The PvObject containing various acquisition + and method parameters. + reco_id (int, optional): Specifies the reconstruction ID for targeted analysis. + Defaults to None. + debug (bool): Flag to enable debugging outputs for detailed tracing. - Raises: - NotImplementedError: If an operation is not implemented. + Attributes: + info_protocol (dict): Stores protocol-related information. + info_fid (dict): Contains information extracted from FID files. + visu_pars (OrderedDict): Visualization parameters extracted for analysis. """ def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id:Optional[int] = None, debug:bool = False): - + """Initialize the ScanInfoAnalyzer with specified parameters and optionally in debug mode. + """ self._set_pars(pvobj, reco_id) if not debug: self.info_protocol = helper.Protocol(self).get_info() @@ -30,6 +46,7 @@ def __init__(self, self._parse_info() def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int]): + """Set parameters from the PvObject for internal use.""" for p in ['acqp', 'method']: try: vals = getattr(pvobj, p) @@ -49,6 +66,8 @@ def _set_pars(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Option setattr(self, 'visu_pars', visu_pars) def _parse_info(self): + """Parse and process detailed information from the visualization parameters and other sources. + """ self.info_dataarray = helper.DataArray(self).get_info() self.info_frame_group = helper.FrameGroup(self).get_info() self.info_image = helper.Image(self).get_info() @@ -59,7 +78,11 @@ def _parse_info(self): self.info_orientation = helper.Orientation(self).get_info() def __dir__(self): + """List dynamic attributes of the instance related to informational properties. + """ return [attr for attr in self.__dict__.keys() if 'info_' in attr] def get(self, key): + """Retrieve information properties based on a specified key. + """ return getattr(self, key) if key in self.__dir__() else None \ No newline at end of file diff --git a/brkraw/api/data/__init__.py b/brkraw/api/data/__init__.py index 2b9f2b4..d340e41 100644 --- a/brkraw/api/data/__init__.py +++ b/brkraw/api/data/__init__.py @@ -1,4 +1,24 @@ +"""Initializes and exports the main components of the MRI study and scan management package. + +This package module consolidates and provides easy access to the primary classes involved in managing +and analyzing MRI study and scan data. The classes exported here facilitate the interfacing with MRI +data at both the study and scan levels, supporting detailed data manipulation and analysis. + +Exports: + Study: A class that manages MRI study operations, extending functionalities for detailed study data handling. + Scan: A class representing individual MRI scans, capable of detailed scan data analysis and management. + ScanInfo: A class for managing basic information and warnings related to MRI scans. + +The `__init__.py` module ensures that these classes are readily accessible when the package is imported, +making the package easier to use and integrate into larger projects or applications. + +Example: + from brkraw.api.data import Study, Scan, ScanInfo + +This enables straightforward access to these classes for further development and deployment in MRI data analysis tasks. +""" + from .study import Study from .scan import Scan, ScanInfo -__all__ = ['Study', 'Scan', 'ScanInfo'] \ No newline at end of file +__all__ = ['Study', 'Scan', 'ScanInfo'] diff --git a/brkraw/api/data/scan.py b/brkraw/api/data/scan.py index 811be91..1ca05c6 100644 --- a/brkraw/api/data/scan.py +++ b/brkraw/api/data/scan.py @@ -1,92 +1,211 @@ +"""This module provides classes and functions for handling and analyzing photovoltaic objects from MRI scans. + +It is designed to interface with the ParaVision data structures (`PvScan`, `PvReco`, `PvFiles`) +and perform various analytical tasks to assist in the study of MRI scans. + +Classes: + ScanInfo: Handles basic scan information and warning accumulation. + Scan: Main interface class for working with Pv objects and handling detailed scan analysis, + including retrieval of objects from memory and performing affine and data array analysis. + +This module is part of the `brkraw` package which aims to provide tools for MRI data manipulation and analysis. +""" + from __future__ import annotations -from typing import Optional, Union import ctypes -from ..pvobj import PvScan, PvReco, PvFiles -from ..pvobj.base import BaseBufferHandler -from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from brkraw.api.pvobj.base import BaseBufferHandler +from brkraw.api.analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional, Union + from .study import Study class ScanInfo(BaseAnalyzer): - def __init__(self): - self.warns = [] - + """Handles the accumulation of warnings and basic information about MRI scans. + + This class is designed to store general scan information and accumulate any warnings that might arise + during the scan processing. It serves as a foundational class for more detailed analysis classes + that may require access to accumulated warnings and basic scan metrics. + + Attributes: + warns (list): A list that accumulates warning messages related to the scan analysis. + """ + def __init__(self) -> None: + """Initializes a new instance of ScanInfo with an empty list for warnings.""" + self.warns: list[str] = [] + @property - def num_warns(self): + def num_warns(self) -> int: + """Counts the number of warnings accumulated during the scan processing. + + Returns: + int: The total number of warnings accumulated. + """ return len(self.warns) class Scan(BaseBufferHandler): - """The Scan class design to interface with analyzer, + """Interface class for working with various Pv objects and handling scan information. - Args: - pvobj (_type_): _description_ + Attributes: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): The photovoltaic object associated with this scan. + reco_id (Optional[int]): The reconstruction ID for the scan, defaults to None. + study_address (Optional[int]): Memory address of the study object, defaults to None. + debug (bool): Flag to enable debug mode, defaults to False. """ - def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], reco_id: Optional[int] = None, - study_address: Optional[int] = None, debug: bool=False): + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], + reco_id: Optional[int] = None, + study_address: Optional[int] = None, + debug: bool = False) -> None: + """Initializes the Scan object with necessary identifiers and addresses. + + Args: + pvobj: The ParaVision data object to be used throughout the scan analysis. + reco_id: Optional reconstruction identifier. + study_address: Optional memory address of the associated study object. + debug: Flag indicating whether to run in debug mode. + """ self.reco_id = reco_id self._study_address = study_address self._pvobj_address = id(pvobj) self.is_debug = debug self.set_scaninfo() - - def retrieve_pvobj(self): + + def retrieve_pvobj(self) -> Union['PvScan', 'PvReco', 'PvFiles', None]: + """Retrieves the pvobj from memory using its stored address. + + Returns: + The pvobj if available; otherwise, None. + """ if self._pvobj_address: - return ctypes.cast(self._pvobj_address, ctypes.py_object).value + return ctypes.cast(self._pvobj_address, + ctypes.py_object).value + return None - def retrieve_study(self): + def retrieve_study(self) -> Optional['Study']: + """Retrieves the study object from memory using its stored address. + + Returns: + The study object if available; otherwise, None. + """ if self._study_address: - return ctypes.cast(self._study_address, ctypes.py_object).value + return ctypes.cast(self._study_address, + ctypes.py_object).value + return None - def set_scaninfo(self, reco_id:Optional[int] = None): + def set_scaninfo(self, reco_id: Optional[int] = None) -> None: + """Sets the scan information based on the reconstruction ID. + + Args: + reco_id: Optional reconstruction ID to specify which scan information to retrieve and set. + """ reco_id = reco_id or self.reco_id self.info = self.get_scaninfo(reco_id) - def get_scaninfo(self, reco_id:Optional[int] = None, get_analyzer:bool = False): + def get_scaninfo(self, + reco_id: Optional[int] = None, + get_analyzer: bool = False) -> Union['ScanInfoAnalyzer', 'ScanInfo']: + """Gets the scan information, optionally using an analyzer to enrich the data. + + Args: + reco_id: Optional reconstruction ID to specify which scan information to retrieve. + get_analyzer: Flag indicating whether to use the ScanInfoAnalyzer for detailed analysis. + + Returns: + An instance of ScanInfo or ScanInfoAnalyzer with the relevant scan details. + """ infoobj = ScanInfo() pvobj = self.retrieve_pvobj() - analysed = ScanInfoAnalyzer(pvobj, reco_id, self.is_debug) + analysed = ScanInfoAnalyzer(pvobj=pvobj, # type: ignore + reco_id=reco_id, + debug=self.is_debug) if get_analyzer: return analysed for attr_name in dir(analysed): if 'info_' in attr_name: attr_vals = getattr(analysed, attr_name) - if warns:= attr_vals.pop('warns', None): + if warns := attr_vals.pop('warns', None): infoobj.warns.extend(warns) setattr(infoobj, attr_name.replace('info_', ''), attr_vals) return infoobj - def get_affine_analyzer(self, reco_id:Optional[int] = None): + def get_affine_analyzer(self, + reco_id: Optional[int] = None) -> 'AffineAnalyzer': + """Retrieves the affine analysis object for the specified reconstruction ID. + + Args: + reco_id: Optional reconstruction ID to specify which affine analysis to retrieve. + + Returns: + An AffineAnalyzer object initialized with the scan information. + """ if reco_id: - info = self.get_scaninfo(reco_id) + info = self.get_scaninfo(reco_id, get_analyzer=False) else: info = self.info if hasattr(self, 'info') else self.get_scaninfo(self.reco_id) - return AffineAnalyzer(info) + return AffineAnalyzer(info) # type: ignore - def get_datarray_analyzer(self, reco_id: Optional[int] = None): + def get_datarray_analyzer(self, + reco_id: Optional[int] = None) -> 'DataArrayAnalyzer': + """Retrieves the data array analyzer for the specified reconstruction ID. + + Args: + reco_id: Optional reconstruction ID to specify which data array analysis to perform. + + Returns: + A DataArrayAnalyzer object initialized with the scan and file information. + """ reco_id = reco_id or self.reco_id pvobj = self.retrieve_pvobj() - fileobj = pvobj.get_2dseq(reco_id=reco_id) + fileobj = pvobj.get_2dseq(reco_id=reco_id) # type: ignore self._buffers.append info = self.info if hasattr(self, 'info') else self.get_scaninfo(reco_id) - return DataArrayAnalyzer(info, fileobj) + return DataArrayAnalyzer(info, fileobj) # type: ignore @property - def avail(self): + def avail(self) -> list[int]: + """List of available reconstruction IDs for the current pvobj. + + Returns: + A list of integers representing the available reconstruction IDs. + """ return self.pvobj.avail @property - def pvobj(self): - return self.retrieve_pvobj() + def pvobj(self) -> Union['PvScan', 'PvReco', 'PvFiles']: + """Retrieves the pvobj from memory. + + Returns: + The current bound pvobj. + """ + return self.retrieve_pvobj() # type: ignore @property - def about_scan(self): + def about_scan(self) -> dict: + """Provides a dictionary with analyzed results for the scan. + + Returns: + A dictionary containing analyzed scan results. + """ return self.info.to_dict() @property - def about_affine(self): + def about_affine(self) -> dict: + """Provides a dictionary with analyzed results for affine transformations. + + Returns: + A dictionary containing analyzed affine results. + """ return self.get_affine_analyzer().to_dict() @property - def about_dataarray(self): - return self.get_datarray_analyzer().to_dict() \ No newline at end of file + def about_dataarray(self) -> dict: + """Provides a dictionary with analyzed results for the data array. + + Returns: + A dictionary containing analyzed data array results. + """ + return self.get_datarray_analyzer().to_dict() diff --git a/brkraw/api/data/study.py b/brkraw/api/data/study.py index aa120c7..d1a29bc 100644 --- a/brkraw/api/data/study.py +++ b/brkraw/api/data/study.py @@ -1,46 +1,182 @@ +"""This module provides classes and functions for managing and analyzing MRI study data. + +The primary class, Study, extends the functionalities of PvStudy from the brkraw.api.pvobj module +and integrates additional analysis capabilities through the BaseAnalyzer class. It handles the +processing of study-specific data, including the retrieval and management of scan objects, +parsing of study header information, and compiling comprehensive information about studies. + +Classes: + Study: Manages MRI study operations and integrates data processing and analysis capabilities. + It provides methods to retrieve specific scans, parse and access study header data, + and compile detailed information about the study and its associated scans and reconstructions. + +Dependencies: + PvStudy (from brkraw.api.pvobj): + Base class for handling the basic operations related to photovoltaic studies. + BaseAnalyzer (from brkraw.api.analyzer.base): + Base class providing analytical methods used across different types of data analyses. + Scan (from .scan): + Class representing individual scans within a study, providing detailed data access and manipulation. + Recipe (from brkraw.api.helper.recipe): + Utility class used for applying specified recipes to data objects, enabling structured data extraction and analysis. + +This module is utilized in MRI research environments where detailed and structured analysis of photovoltaic data is required. +""" + from __future__ import annotations -from ..pvobj import PvDataset -from .scan import Scan +import os +import yaml +import warnings +from copy import copy from pathlib import Path +from dataclasses import dataclass +from .scan import Scan +from brkraw import config +from brkraw.api.pvobj import PvStudy +from brkraw.api.analyzer.base import BaseAnalyzer +from xnippet.parser import RecipeParser +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional + + +@dataclass +class StudyHeader: + header: dict + scans: list + + +@dataclass +class ScanHeader: + scan_id: int + header: dict + recos: list + + +@dataclass +class RecoHeader: + reco_id: int + header: dict + + +class Study(PvStudy, BaseAnalyzer): + """Handles operations related to a specific study, integrating PvStudy and analytical capabilities. + + This class extends the functionalities of PvStudy to include detailed analyses + and operations specific to the study being handled. It integrates with various + data processing and analysis methods defined in the base analyzer. -class Study(PvDataset): - def __init__(self, path: Path): - super().__init__(path) + Attributes: + header (Optional[dict]): Parsed study header information. + """ + _info: StudyHeader + + def __init__(self, path: Path) -> None: + """Initializes the Study object with a specified path. + + Args: + path (Path): The file system path to the study data. + """ + super().__init__(self._resolve(path)) self._parse_header() - def get_scan(self, scan_id, reco_id=None, debug=False): - """ - Get a scan object by scan ID. + def get_scan(self, + scan_id: int, + reco_id: Optional[int] = None, + debug: bool = False) -> 'Scan': + """Retrieves a Scan object for a given scan ID with optional reconstruction ID. + + Args: + scan_id (int): The unique identifier for the scan. + reco_id (Optional[int]): The reconstruction identifier, defaults to None. + debug (bool): Flag to enable debugging outputs, defaults to False. + + Returns: + Scan: The Scan object corresponding to the specified scan_id and reco_id. """ pvscan = super().get_scan(scan_id) - return Scan(pvobj=pvscan, reco_id=reco_id, - study_address=id(self), debug=debug) + return Scan(pvobj=pvscan, + reco_id=reco_id, + study_address=id(self), + debug=debug) - def _parse_header(self): + def _parse_header(self) -> None: + """Parses the header information from the study metadata. + + Extracts the header data based on subject and parameters, setting up the + study header attribute. This method handles cases with different versions + of ParaVision by adjusting the header format accordingly. + """ if not self.contents or 'subject' not in self.contents['files']: self.header = None return subj = self.subject subj_header = getattr(subj, 'header') if subj.is_parameter() else None if title := subj_header['TITLE'] if subj_header else None: - self.header = {k.replace("SUBJECT_",""):v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} + self.header = {k.replace("SUBJECT_", ""): v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} self.header['sw_version'] = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" @property - def avail(self): + def avail(self) -> list: + """List of available scan IDs within the study. + + Returns: + list: A list of integers representing the available scan IDs. + """ return super().avail - @property #TODO - def info(self): - """output all analyzed information""" - info = {'header': None, - 'scans': {}} - if header := self.header: - info['header'] = header - # for scan_id in self.avail: - # scanobj = self.get_scan(scan_id) - # info['scans'][scan_id] = {'protocol_name': scanobj.info.protocol['protocol_name'], - # 'recos': {}} - # for reco_id in scanobj.avail: - # info['scans'][scan_id]['recos'][reco_id] = scanobj.get_info(reco_id).frame_group - return info + @property + def info(self) -> dict: + if not hasattr(self, '_info'): + self._process_header() + if not hasattr(self, '_streamed_info'): + self._streamed_info = self._stream_info() + return self._streamed_info + + def _stream_info(self): + stream = copy(self._info.__dict__) + scans = {} + for s in self._info.scans: + scans[s.scan_id] = s.header + recos = {} + for r in s.recos: + recos[r.reco_id] = r.header + if recos: + scans[s.scan_id]['recos'] = recos + stream['scans'] = scans + return stream + + def _process_header(self): + """Compiles comprehensive information about the study, including header details and scans. + + Uses external YAML configuration to drive the synthesis of structured information about the study, + integrating data from various scans and their respective reconstructions. + + Returns: + dict: A dictionary containing structured information about the study, its scans, and reconstructions. + """ + spec_path = os.path.join(os.path.dirname(__file__), 'study.yaml') # TODO:asdasd + with open(spec_path, 'r') as f: + spec = yaml.safe_load(f) + self._info = StudyHeader(header=RecipeParser(self, copy(spec)['study']).get(), + scans=[]) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for scan_id in self.avail: + scanobj = self.get_scan(scan_id) + scan_spec = copy(spec)['scan'] + scaninfo_targets = [scanobj.info, + scanobj.get_scaninfo(get_analyzer=True)] + scan_header = ScanHeader(scan_id=scan_id, + header=RecipeParser(scaninfo_targets, scan_spec).get(), + recos=[]) + for reco_id in scanobj.avail: + recoinfo_targets = [scanobj.get_scaninfo(reco_id=reco_id), + scanobj.get_scaninfo(reco_id=reco_id, get_analyzer=True)] + reco_spec = copy(spec)['reco'] + reco_header = RecipeParser(recoinfo_targets, reco_spec).get() + reco_header = RecoHeader(reco_id=reco_id, + header=reco_header) if reco_header else None + if reco_header: + scan_header.recos.append(reco_header) + self._info.scans.append(scan_header) diff --git a/brkraw/api/data/study.yaml b/brkraw/api/data/study.yaml new file mode 100644 index 0000000..0dfa198 --- /dev/null +++ b/brkraw/api/data/study.yaml @@ -0,0 +1,49 @@ +name: tonifti-studyinfo +type: recipe +subtype: studyinfo +version: 24.5.3 + +study: + date: + - header.study_date + - header.date + dob: header.dbirth + id: header.id + name: header.name_string + operator: study_operator + position: + - header.study_instrument_position + - entry: header.entry + position: header.position + script: entry.split("_").pop(-1) + "_" + position.split("_").pop(-1) + sex: + - header.gender + - header.sex + study_name: header.study_name + study_nr: header.study_nr + sw_version: header.sw_version + type: header.type + weight: + - header.study_weight + - header.weight + +scan: + dim: image.dim + in_plane_shape: image.shape + in_plann_resolution: image.resolution + method: protocol.scan_method + num_cycles: cycle.num_cycles + num_slice_packs: slicepack.num_slice_packs + num_slices_each_pack: slicepack.num_slices_each_pack + ppg: protocol.pulse_program + protocol: protocol.protocol_name + slice_distances_each_pack: slicepack.slice_distances_each_pack + slice_order_scheme: slicepack.slice_order_scheme + time_step: cycle.time_step + +reco: + dim_description: + dim_desc: image.dim_desc + fg_desc: frame_group.id + script: dim_desc + [f.split("_")[-1].lower() for f in fg_desc] + type: frame_group.type diff --git a/brkraw/api/helper/recipe.py b/brkraw/api/helper/recipe.py deleted file mode 100644 index ae91f5e..0000000 --- a/brkraw/api/helper/recipe.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations -import re -import warnings -from collections import OrderedDict -from typing import TYPE_CHECKING -from .base import BaseHelper -if TYPE_CHECKING: - from typing import Optional, Dict, List, Any - from brkraw.api.analyzer import BaseAnalyzer - -class Recipe(BaseHelper): - def __init__(self, target: 'BaseAnalyzer', recipe: dict, legacy: bool = False, - startup_scripts:Optional[List[str]] = None): - self.target = target - self.recipe = recipe - self.results = OrderedDict() - self.backward_comp = legacy - self.startup_scripts = startup_scripts - self._parse_recipe() - - def _parse_recipe(self): - for key, value in self.recipe.items(): - self.results[key] = self._eval_value(value) - - def _eval_value(self, value: Any): - if isinstance(value, str): - value = self._process_str(value) - elif isinstance(value, list): - value = self._process_list(value) - elif isinstance(value, dict): - value = self._process_dict(value) - return value - - def _legacy_parser(self, param_key: str): - for pars in ['acqp', 'method', 'visu_pars']: - value = getattr(self.target, pars).get(param_key) - if value is not None: - return value - return param_key - - def _process_str(self, str_obj: str): - if self.backward_comp: - return self._legacy_parser(str_obj) - ptrn = r'(?P^[a-zA-Z][a-zA-Z0-9_]*)\.(?P[a-zA-Z][a-zA-Z0-9_]*)' - if matched := re.match(ptrn, str_obj): - attr = getattr(self.target, matched['attr']) - return attr.get(matched['key'], None) - else: - return str_obj - - def _process_list(self, list_obj: List): - for c in list_obj: - processed = self._eval_value(c) - if processed is not None: - return processed - return None - - def _process_dict(self, dict_obj: Dict): - script_cmd = 'Equation' if self.backward_comp else 'script' - if script_cmd in dict_obj.keys(): - return self._process_dict_case_script(dict_obj, script_cmd) - elif 'key' in dict_obj.keys(): - return self._process_dict_case_pick_from_list(dict_obj) - else: - processed = {} - for key, value in dict_obj.items(): - processed[key] = self._eval_value(value) - return processed - - def _process_dict_case_script(self, dict_obj: Dict, script_cmd: List[str]): - script = dict_obj.pop(script_cmd) - for s in self.startup_scripts: - exec(s) - for key, value in dict_obj.items(): - value = self._eval_value(value) - if value == None: - return None - exec(f'global {key}') - exec(f'{key} = {value}') - exec(f"output = {script}", globals(), locals()) - return locals()['output'] - - def _process_dict_case_pick_from_list(self, dict_obj: Dict): - key = dict_obj.pop('key') - value = self._process_str(key) - if not isinstance(value, list): - warnings.warn(f"The value returned from '{key}' is not of type 'list'.", UserWarning) - return None - if 'where' in dict_obj.keys(): - hint = self._eval_value(dict_obj.pop('where')) - return value.index(hint) if hint in value else None - elif 'idx' in dict_obj.keys(): - idx = self._eval_value(dict_obj.pop('idx')) - return value[idx] if idx < len(value) else None - - def get(self): - return self.results \ No newline at end of file diff --git a/brkraw/api/plugin/__init__.py b/brkraw/api/plugin/__init__.py deleted file mode 100644 index a43766b..0000000 --- a/brkraw/api/plugin/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .aggregator import Aggregator -from .plugged import Plugged -from .preset import Preset - -__all__ = ['Aggregator', 'Plugged', 'Preset'] \ No newline at end of file diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index 76c26cf..74bd082 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,7 +1,23 @@ -from .pvdataset import PvDataset +"""Initialization for the pvobj module. + +This module is a cornerstone for interfacing with raw datasets within the Bruker imaging framework. +It provides essential classes for parsing raw datasets, managing parameter metadata, and organizing +data at various levels—from individual scans to comprehensive experimental sessions. + +Classes Exposed: + PvStudy: Manages data for an entire session, encapsulating all scans and reconstructions. + PvScan: Handles data related to individual scans, including raw FIDs, acquisition, and method parameters. + PvReco: Manages data related to image reconstructions within a single scan. + PvFiles: Provides a flexible container for raw files that may not be systematically organized, + allowing users to add any files and utilize full module functionalities if all required files are present. + Parameter: Represents parameter metadata for various components within a scan. + Parser: Facilitates the parsing of raw dataset information into structured formats. +""" + +from .pvstudy import PvStudy from .pvscan import PvScan from .pvreco import PvReco from .pvfiles import PvFiles from .parameters import Parameter, Parser -__all__ = ['PvDataset', 'PvScan', 'PvReco', 'PvFiles', 'Parameter', 'Parser'] \ No newline at end of file +__all__ = ['PvStudy', 'PvScan', 'PvReco', 'PvFiles', 'Parameter', 'Parser'] \ No newline at end of file diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index 6576be1..e26aa86 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -1,56 +1,86 @@ +"""Base functionality for handling buffer and method operations in pvobj. + +This module defines core classes that offer foundational utilities for managing and processing raw datasets. +The classes provide methods for handling file operations, such as opening and closing file buffers, fetching +directory structures, and more, all while using an object-oriented approach to maintain and access these datasets. + +Classes: + BaseBufferHandler: Manages file buffer operations, ensuring proper opening, closing, and context management of file streams. + BaseMethods: Extends BaseBufferHandler to include various file and directory handling methods necessary + for accessing and managing dataset contents. +""" + from __future__ import annotations import os -import zipfile -from collections import OrderedDict -from collections import defaultdict -from typing import TYPE_CHECKING +from zipfile import ZipFile +from collections import OrderedDict, defaultdict from pathlib import Path from .parameters import Parameter - +from xnippet.formatter import PathFormatter +from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Union, List - from io import BufferedReader - from zipfile import ZipExtFile + from typing import Optional, List + from .types import PvFileBuffer -class BaseBufferHandler: - _buffers: Union[List[BufferedReader], List[ZipExtFile]] = [] +class BaseBufferHandler(PathFormatter): + """Handles buffer management for file operations, ensuring all file streams are properly managed. + + This class provides context management for file buffers, allowing for easy and safe opening and closing + of file streams. It ensures that all buffers are closed when no longer needed, preventing resource leakage. + + Attributes: + _buffers (Union[List[BufferedReader], List[ZipExtFile]]): A list of file buffer objects. + """ + _buffers: List[PvFileBuffer] = [] def close(self): + """Closes all open file buffers managed by this handler.""" if self._buffers: for b in self._buffers: if not b.closed: b.close() def __enter__(self): + """Enters the runtime context related to this object.""" return self def __exit__(self, exc_type, exc_val, exc_tb): + """Exits the runtime context and closes the file buffers, handling any exceptions.""" self.close() - # Return False to propagate exceptions, if any return False -class BaseMethods: - """ - The `BaseMethods` class provides internal method for PvObjects. +class BaseMethods(BaseBufferHandler): + """Provides utility methods for handling files and directories within PvObjects. - Explanation: - This class contains various methods for handling files and directories, including fetching directory structure, - fetching zip file contents, opening files as file objects or strings, retrieving values associated with keys, and setting configuration options. + This class offers methods to fetch directory structures, handle zip file contents, and open files either + as file objects or as readable strings. It also provides a property to access the contents of directories + and zip files, tailored to the needs of managing Bruker raw datasets. - Args: - **kwargs: Keyword arguments for configuration options. - - Returns: - None + Attributes: + _scan_id (Optional[int]): The identifier for a specific scan, used in file path resolutions. + _reco_id (Optional[int]): The identifier for a specific reconstruction, used in file path resolutions. + _path (Optional[Path]): The base path for file operations. + _rootpath (Optional[Path]): The root path of the dataset, used for resolving relative paths. + _contents (Optional[dict]): A structured dictionary containing directory and file details. """ - _scan_id = None - _reco_id = None - _path = None - _rootpath = None - _contents = None + _scan_id: int = None + _reco_id: int = None + _path: 'Path' = None + _rootpath: 'Path' = None + _contents: 'Path' = None - def isinstance(self, name): + def isinstance(self, name: str): + """Check if the class name matches the provided string. + + This method compares the class name of the current instance with a given string to determine if they match. + + Args: + name (str): The class name to check against the instance's class name. + + Returns: + bool: True if the given name matches the instance's class name, otherwise False. + """ return self.__class__.__name__ == name @staticmethod @@ -91,7 +121,7 @@ def _fetch_zip(path: 'Path'): - 'files': A list of file names. - 'file_indexes': A list of file indexes. """ - with zipfile.ZipFile(path) as zip_file: + with ZipFile(path) as zip_file: contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': [], 'file_sizes': []}) for i, item in enumerate(zip_file.infolist()): if not item.is_dir(): @@ -105,7 +135,7 @@ def _fetch_zip(path: 'Path'): contents[dirpath]['dirs'].add(dirname) return contents - def _open_as_fileobject(self, key): + def _open_as_fileobject(self, key: str): """Opens a file object for the given key. Args: @@ -131,7 +161,7 @@ def _open_as_fileobject(self, key): raise KeyError(f'Failed to load filename "{key}" from folder "{rel_path}".\n [{", ".join(files)}]') if file_indexes := self.contents.get('file_indexes'): - with zipfile.ZipFile(rootpath) as zf: + with ZipFile(rootpath) as zf: idx = file_indexes[files.index(key)] return zf.open(zf.namelist()[idx]) else: @@ -139,7 +169,7 @@ def _open_as_fileobject(self, key): path = os.path.join(*path_list) return open(path, 'rb') - def _open_as_string(self, key): + def _open_as_string(self, key: str): """Opens a file as binary, decodes it as UTF-8, and splits it into lines. Args: @@ -166,7 +196,7 @@ def __getitem__(self, key): """ return self.__getattr__(key) - def __getattr__(self, key): + def __getattr__(self, key: str): """ Get attribute by name. @@ -195,9 +225,32 @@ def __getattr__(self, key): @property def contents(self): + """Access the contents dictionary holding directory and file details. + + This property provides access to a structured dictionary that organizes directory and file information, + facilitating file operations across the class methods. + + Returns: + dict: The contents dictionary with details about directories and files. + """ return self._contents def get_fid(self, scan_id:Optional[int] = None): + """Retrieve the file object for the 'fid' or 'rawdata.job0' file from the dataset. + + This method attempts to fetch the 'fid' file commonly used in imaging datasets. If 'fid' is not found, + it tries 'rawdata.job0'. It uses internal methods to navigate through dataset structures based on provided scan ID. + + Args: + scan_id (Optional[int]): The identifier for the scan. Necessary if the class structure requires it to fetch data. + + Returns: + BufferedReader: The file object for the 'fid' or 'rawdata.job0'. + + Raises: + TypeError: If 'scan_id' is required but not provided. + FileNotFoundError: If neither 'fid' nor 'rawdata.job0' files are found in the dataset. + """ try: pvobj = self.get_scan(scan_id) if hasattr(self, 'get_scan') else self except KeyError: @@ -210,6 +263,21 @@ def get_fid(self, scan_id:Optional[int] = None): "Please check the dataset and ensure the file is in the expected location.") def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): + """Retrieve the '2dseq' file from the dataset for a specific scan and reconstruction. + + This method navigates through the dataset structure to fetch the '2dseq' file, a common data file in imaging datasets. + + Args: + scan_id (Optional[int]): The scan ID to navigate to the correct scan. Required if the dataset structure is hierarchical. + reco_id (Optional[int]): The reconstruction ID. Required if multiple reconstructions exist and are not specified. + + Returns: + BufferedReader: The file object for the '2dseq'. + + Raises: + TypeError: If necessary IDs are not provided. + FileNotFoundError: If the '2dseq' file is not found in the dataset. + """ try: if scan_id and hasattr(self, 'get_scan'): pvobj = self.get_scan(scan_id).get_reco(reco_id) @@ -233,7 +301,16 @@ def get_2dseq(self, scan_id:Optional[int] = None, reco_id:Optional[int] = None): "Please check the dataset and ensure the file is in the expected location.") @staticmethod - def _is_binary(fileobj, bytes=512): + def _is_binary(fileobj: PvFileBuffer, bytes: int = 512): + """Determine if a file is binary by reading a block of data. + + Args: + fileobj (BufferedReader): The file object to check. + bytes (int): Number of bytes to read for the check. + + Returns: + bool: True if the file contains binary data, otherwise False. + """ block = fileobj.read(bytes) fileobj.seek(0) return b'\x00' in block \ No newline at end of file diff --git a/brkraw/api/pvobj/parameters.py b/brkraw/api/pvobj/parameters.py index d50eb45..777730e 100644 --- a/brkraw/api/pvobj/parameters.py +++ b/brkraw/api/pvobj/parameters.py @@ -1,31 +1,57 @@ +"""Provides functionality for parsing and managing parameter metadata within Paravision datasets. + +This module includes the `Parameter` class, which extends the functionalities of a generic `Parser` class. +It specifically handles the extraction and management of parameter data and header information from strings +that represent parameter dictionaries in Paravision datasets. +These capabilities are critical for accessing and manipulating the underlying data in a structured and interpretable format. + +Classes: + Parameter: A class designed to parse and manage parameter dictionaries, providing access to parameters and headers, + processing content data, and setting parameter values based on input data. + +Dependencies: + re: Regular expression operations for parsing and processing text. + numpy: Provides support for large, multi-dimensional arrays and matrices, + along with a large collection of high-level mathematical functions to operate on these arrays. + OrderedDict: A dictionary subclass that remembers the order in which its contents are added, + used for maintaining an ordered set of parameters. +""" + +from __future__ import annotations import re import numpy as np from collections import OrderedDict from .parser import Parser, ptrn_comment, PARAMETER, HEADER +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional + from typing import List + from numpy.typing import NDArray class Parameter: - """ - Paravision Parameter object + """Handles the parsing and management of parameter data for Paravision experiments. - This class extends the Parser class and provides methods to initialize the object with a stringlist of parameter dictionaries, retrieve the parameters and headers, and process the contents of the data. + This class extends the Parser class, utilizing its functionalities to interpret a list of string + representations of parameter dictionaries, manage parameter and header information, and process the contents of the data. Args: - stringlist: A list of strings containing the parameter dictionaries. - - Examples: - >>> stringlist = ["param1", "param2"] - >>> parameter = Parameter(stringlist) + stringlist (List[str]): A list of strings containing parameter entries. + name (str): The name identifying the parser object. + scan_id (Optional[int]): The scan ID associated with the parameter data. + reco_id (Optional[int]): The reconstruction ID associated with the parameter data. Attributes: - parameters (property): Get the parameters of the data. - headers (property): Get the headers of the data. - - Methods: - _process_contents: Process the contents of the data based on the given parameters. - _set_param: Set the parameters and headers based on the given data. + _parameters (OrderedDict): Stores parameter values. + _header (OrderedDict): Stores header information. + _name (str): Name of the parser object. + _repr_items (List[str]): List of string representations for object description. """ - def __init__(self, stringlist, name, scan_id=None, reco_id=None): + def __init__(self, + stringlist: List[str], + name: str, + scan_id: Optional[int] = None, + reco_id: Optional[int] = None): """ Initialize the Parameter object with the given stringlist, name, scan_id, and reco_id. @@ -52,52 +78,50 @@ def __init__(self, stringlist, name, scan_id=None, reco_id=None): @property def name(self): + """Get a formatted name of the parser object, capitalizing each part separated by underscores. + + Returns: + str: A capitalized version of the name attribute. + """ if '_' in self._name: return ''.join([s.capitalize() for s in self._name.split('_')]) return self._name.capitalize() @property def parameters(self): - """ - Get the parameters of the data. + """Retrieve the parameters processed by the parser. Returns: - OrderedDict: The parameters of the data. - - Examples: - This property can be accessed directly on an instance of the class to retrieve the parameters. + OrderedDict: A dictionary containing the parameters of the data. """ return self._parameters @property def header(self): - """ - Get the headers of the data. + """Retrieve the headers processed by the parser. Returns: - OrderedDict: The headers of the data. - - Examples: - This property can be accessed directly on an instance of the class to retrieve the headers. + OrderedDict: A dictionary containing the headers of the data. """ return self._header - def _process_contents(self, contents, addr, addr_diff, index, value): - """ - Process the contents of the data based on the given parameters. + def _process_contents(self, + contents: List[str], + addr: int, + addr_diff: NDArray, + index: int, + value: str): + """Process the data contents based on parameter addresses and differences. Args: - contents: The contents of the data. - addr: The address of the current parameter. - addr_diff: The difference in addresses between parameters. - index: The index of the current parameter. - value: The value of the current parameter. + contents (List[str]): The full list of content strings. + addr (int): The current parameter's address in contents. + addr_diff (numpy.ndarray): An array of address differences between parameters. + index (int): The index of the current parameter. + value (str): The initial value of the parameter. Returns: - tuple: A tuple containing the processed data and its shape. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. + tuple: A tuple containing the processed data as a string and its shape or format as int. """ if addr_diff[index] > 1: c_lines = contents[(addr + 1):(addr + addr_diff[index])] @@ -105,20 +129,19 @@ def _process_contents(self, contents, addr, addr_diff, index, value): return (data, value) if data else (Parser.convert_string_to(value), -1) return Parser.convert_string_to(value), -1 - def _set_param(self, params, param_addr, contents): - """ - Set the parameters and headers based on the given data. + def _set_param(self, + params: List[tuple], + param_addr: List[int], + contents: List[str]): + """Initialize parameters and headers from parsed data. Args: - params: A list of parameter information. - param_addr: The addresses of the parameters. - contents: The contents of the data. + params (List[tuple]): List containing parameter tuples (dtype, key, value). + param_addr (List[int]): List of addresses where parameters are located in the content. + contents (List[str]): The contents as a list of strings from which to extract data. Raises: - ValueError: If an invalid dtype is encountered. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. + ValueError: If an invalid data type (dtype) is encountered. """ addr_diff = np.diff(param_addr) self._params_key_struct = params @@ -136,26 +159,74 @@ def _set_param(self, params, param_addr, contents): raise ValueError("Invalid dtype encountered in '_set_param'") def __getitem__(self, key): + """Allows dictionary-like access to parameters. + + Args: + key (str): The key for the desired parameter. + + Returns: + The value associated with the key in the parameters dictionary. + """ return self.parameters[key] def __getattr__(self, key): + """Allows attribute-like access to parameters. + + Args: + key (str): The key for the desired parameter. + + Returns: + The value associated with the key in the parameters dictionary. + """ return self.parameters[key] def __repr__(self): + """Provide a string representation of the Parameter object for debugging and logging. + + Returns: + str: A string representation of the object. + """ return f"{self.name}({', '.join(self._repr_items)})" def keys(self): + """Get the keys of the parameters dictionary. + + Returns: + KeysView: A view of the keys in the parameter dictionary. + """ return self.parameters.keys() def values(self): + """Get the values of the parameters dictionary. + + Returns: + ValuesView: A view of the values in the parameter dictionary. + """ return self.parameters.values() - def get(self, key): + def items(self): + """Get the key and value pairs of the parameters dictionary. + + Returns: + ItemView: A view of the values in the parameter dictionary. + """ + return self.parameters.items() + + def get(self, key: str): + """Get the value of a parameter by key, returning None if the key is not found. + + Args: + key (str): The key for the desired parameter. + + Returns: + The value associated with the key if it exists, otherwise None. + """ if key in self.keys(): return self.parameters[key] else: return None def is_parameter(self): + """True if data successfully loaded""" return True if self.header else False \ No newline at end of file diff --git a/brkraw/api/pvobj/parser.py b/brkraw/api/pvobj/parser.py index 59225b1..40db6bf 100755 --- a/brkraw/api/pvobj/parser.py +++ b/brkraw/api/pvobj/parser.py @@ -1,3 +1,15 @@ +"""Provides parsing utilities for handling and converting parameter data from string representations to structured formats. + +This module includes the `Parser` class, which leverages regular expressions to accurately parse and convert various +data types found in parameter files, such as integers, floats, complex arrays, and strings. +The functionality is designed to support the manipulation and analysis of data from Paravision parameter files, +ensuring data integrity and accessibility. + +Classes: + Parser: A class that offers a comprehensive suite of methods for parsing parameter data, + supporting complex data structures and providing tools to convert and clean data efficiently. +""" + import re import numpy as np from collections import OrderedDict, defaultdict @@ -25,34 +37,31 @@ class Parser: - """ - Parser class for handling parameter dictionaries. + """A utility class for parsing and converting parameter data from string representations. - This class provides methods for loading parameters from a list of strings, converting strings to specific data types, cleaning up array elements, processing complex arrays, parsing shapes, parsing data, parsing array data, and converting data to specified shapes. + The Parser class uses regular expressions to identify and convert data types found in parameter files. It handles typical data formats including integers, floats, strings, and complex arrays, making them amenable for further processing and analysis. Methods: - load_param: JCAMP DX parser that loads parameters from a list of strings. - convert_string_to: Converts a string to a specific data type if it matches certain patterns. - clean_up_elements_in_array: Cleans up array elements by replacing patterns with repeated values. - process_bisarray: Determines the case of an array with BIS prefix by converting each element to a specific data type. - process_complexarray: Process a complex array and return a parsed dictionary. - process_string: Process a string and return the parsed data based on its shape. - parse_shape: Parse the shape of the data. - parse_data: Parse the data based on its format. - parse_array_data: Parse the array data. - convert_data_to: Convert the given data to the specified shape. + load_param(stringlist): Parses parameters from a list of strings, identifying headers and parameters. + convert_string_to(string): Converts strings to appropriate data types based on their content. + clean_up_elements_in_array(data): Cleans array elements by handling patterns and replacing them with repeated values. + process_complexarray(data): Converts complex nested array strings into structured dictionary formats. + parse_shape(shape): Interprets textual shape descriptions into tuple or list formats. + parse_data(data): Converts string data into lists or single values depending on the structure. + convert_data_to(data, shape): Transforms data into the specified shape or data type. """ @staticmethod def load_param(stringlist): - """JCAMP DX parser that loads parameters from a list of strings. + """Parses parameters from a list of string representations of a JCAMP DX file. + + Each string is inspected for key-value pairs that represent parameters or headers. + This method categorizes and stores them accordingly. Args: - stringlist (list): A list of strings containing parameter information. + stringlist (list[str]): A list of strings, each containing a line from a JCAMP DX file. Returns: - params (OrderedDict): An ordered dictionary containing the parsed parameters, where the key is the line number and the value is a tuple of the parameter type, key, and value. - param_addresses (list): A list of line numbers where parameters were found. - stringlist (list): The original list of strings. + tuple: A tuple containing an OrderedDict of parameters, a list of line numbers where parameters are found, and the original list of strings. """ params = OrderedDict() param_addresses = [] @@ -74,13 +83,13 @@ def load_param(stringlist): @staticmethod def convert_string_to(string): - """Converts a string to a specific data type if it matches certain patterns. + """Converts a string to an integer, float, or string based on its content, using regular expression matching. Args: string (str): The string to be converted. Returns: - float, int, or str or None: The converted value of the string, or None if the string is empty. + int, float, str, or None: The converted value of the string, or None if the string is empty. """ string = string.strip() if re.match(ptrn_string, string): @@ -137,19 +146,13 @@ def process_bisarray(elements, shape): @staticmethod def process_complexarray(data): - """ - Process a complex array and return a parsed dictionary. + """Processes a string representation of a complex nested array and converts it into a structured dictionary format. Args: - data: The complex array to be processed. + data (str): The complex array string to be processed. Returns: - dict: A dictionary containing the parsed data. - - Examples: - >>> data = [1, [2, 3], [[4, 5], [6, 7]]] - >>> process_complexarray(data) - {'level_1': [[1]], 'level_2': [[2, 3]], 'level_3': [[4, 5], [6, 7]]} + dict: A dictionary representing the structured levels of the array, categorized by depth. """ data_holder = copy(data) parser = defaultdict(list) @@ -164,8 +167,7 @@ def process_complexarray(data): @staticmethod def process_string(data, shape): - """ - Process a string and return the parsed data based on its shape. + """Process a string and return the parsed data based on its shape. Args: data: The string to be processed. @@ -173,17 +175,6 @@ def process_string(data, shape): Returns: tuple: A tuple containing the parsed data and an empty string, or the processed string. - - Examples: - >>> data = "[1, 2, 3]" - >>> shape = "(3,)" - >>> process_string(data, shape) - ([1, 2, 3], '') - - >>> data = "Hello, World!" - >>> shape = "" - >>> process_string(data, shape) - 'Hello, World!' """ shape = Parser.parse_shape(shape) if elements := re.findall(ptrn_bisstring, data): @@ -201,8 +192,7 @@ def process_string(data, shape): @staticmethod def parse_shape(shape): - """ - Parse the shape of the data. + """Parse the shape of the data. Args: shape: The shape of the data. @@ -212,23 +202,6 @@ def parse_shape(shape): Raises: ValueError: If the shape is invalid. - - Examples: - >>> shape = "(3, 4)" - >>> parse_shape(shape) - '3, 4' - - >>> shape = "3, 4" - >>> parse_shape(shape) - '3, 4' - - >>> shape = "(3, 4, 5)" - >>> parse_shape(shape) - '3, 4, 5' - - >>> shape = "(3, 4,)" - >>> parse_shape(shape) - ValueError: Invalid shape: (3, 4,) """ if shape != -1: shape = re.sub(ptrn_array, r'\g', shape) @@ -238,31 +211,13 @@ def parse_shape(shape): @staticmethod def parse_data(data): - """ - Parse the data based on its format. + """Parse the data based on its format. Args: data: The data to be parsed. Returns: list or str: The parsed data. - - Examples: - >>> data = "[1, 2, 3]" - >>> parse_data(data) - [1, 2, 3] - - >>> data = "1, 2, 3" - >>> parse_data(data) - [1, 2, 3] - - >>> data = "1 2 3" - >>> parse_data(data) - [1, 2, 3] - - >>> data = "Hello, World!" - >>> parse_data(data) - 'Hello, World!' """ if matched := re.findall(ptrn_array, data): return Parser.parse_array_data(matched) @@ -274,17 +229,13 @@ def parse_data(data): @staticmethod def parse_array_data(matched): - """ - Parse the array data. + """Parse the array data. Args: matched: A list of strings representing the matched array data. Returns: list: The parsed array data. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. """ if any(',' in cell for cell in matched): return [[Parser.convert_string_to(c) for c in cell.split(',')] for cell in matched] @@ -292,8 +243,7 @@ def parse_array_data(matched): @staticmethod def convert_data_to(data, shape): - """ - Convert the given data to the specified shape. + """Convert the given data to the specified shape. Args: data: The data to be converted. @@ -301,9 +251,6 @@ def convert_data_to(data, shape): Returns: object: The converted data. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. """ if isinstance(data, str): data, shape = Parser.process_string(data, shape) diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvdataset.py deleted file mode 100755 index 13b3c9e..0000000 --- a/brkraw/api/pvobj/pvdataset.py +++ /dev/null @@ -1,191 +0,0 @@ -import re -import zipfile -from collections import OrderedDict -from pathlib import Path -from .base import BaseMethods -from .pvscan import PvScan - - -class PvDataset(BaseMethods): - """ - A class representing a PvDataset object. - - Inherits from BaseMethods. - - Attributes: - is_compressed (bool): Indicates if the dataset is compressed. - - Methods: - get_scan(scan_id): Get a specific scan object by ID. - - Properties: - path (str): The path of the object. - avail (list): A list of available scans. - contents (dict): A dictionary of pvdataset contents. - """ - def __init__(self, path: Path, debug: bool=False): - """ - Initialize the object with the given path and optional debug flag. - - Args: - path: The path to initialize the object with. - debug: A flag indicating whether debug mode is enabled. - **kwargs: Additional keyword arguments. - - Raises: - Any exceptions raised by _check_dataset_validity or _construct methods. - - Notes: - If 'pvdataset' is present in kwargs, it will be used to initialize the object via super(). - - Examples: - obj = ClassName(path='/path/to/dataset', debug=True) - """ - - if not debug: - self._check_dataset_validity(path) - self._construct() - - # internal method - def _check_dataset_validity(self, path: Path): - """ - Checks the validity of a given dataset path. - - Note: This method only checks the validity of the dataset to be fetched using `fetch_dir` and `fetch_zip`, - and does not check the validity of a `PvDataset`. - - Args: - path (str): The path to check. - - Raises: - FileNotFoundError: If the path does not exist. - ValueError: If the path is not a directory or a file, or if it does not meet the required criteria. - - Returns: - None - """ - path = Path(path) if isinstance(path, str) else path - self._path: Path = path.absolute() - if not self._path.exists(): - raise FileNotFoundError(f"The path '{self._path}' does not exist.") - if self._path.is_dir(): - self._contents = self._fetch_dir(self._path) - self.is_compressed = False - elif self._path.is_file() and zipfile.is_zipfile(self._path): - self._contents = self._fetch_zip(self._path) - self.is_compressed = True - else: - raise ValueError(f"The path '{self._path}' does not meet the required criteria.") - - def _construct(self): - """ - Constructs the object by organizing the contents. - - This method constructs the object by organizing the contents based on the provided directory structure. - It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. - After processing, it removes the processed paths from the `_contents` dictionary. - - Args: - **kwargs: keyword argument for datatype specification. - - Returns: - None - """ - self._scans = OrderedDict() - self._backup = OrderedDict() - - to_remove = [] - for path, contents in self._contents.items(): - if not path: - self._root = contents - to_remove.append(path) - elif not contents['files']: - to_remove.append(path) - elif matched := re.match(r'(?:.*/)?(\d+)/(\D+)/(\d+)$', path) or re.match(r'(?:.*/)?(\d+)$', path): - to_remove.append(self._process_childobj(matched, (path, contents))) - self._clear_contents(to_remove) - - def _process_childobj(self, matched, item): - """ - The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. - - Args: - matched: A `re.Match` object representing the matched pattern. - item: A tuple containing the path and contents of the child object. - **kwargs: Additional keyword arguments. - - Returns: - str: The path of the processed child object. - - Raises: - None. - - Examples: - # Example usage of _process_childobj - matched = re.match(pattern, input_string) - item = ('path/to/child', {'dirs': set(), 'files': [], 'file_indexes': []}) - result = obj._process_childobj(matched, item, pvscan={'binary_files': [], 'parameter_files': ['method', 'acqp', 'visu_pars']}) - """ - path, contents = item - scan_id = int(matched.group(1)) - if scan_id not in self._scans: - self._scans[scan_id] = PvScan(scan_id, (self.path, path)) - if len(matched.groups()) == 1 and 'pdata' in contents['dirs']: - self._scans[scan_id].update(contents) - elif len(matched.groups()) == 3 and matched.group(2) == 'pdata': - reco_id = int(matched.group(3)) - self._scans[scan_id].set_reco(path, reco_id, contents) - else: - self._backup[path] = contents - return path - - @property - def contents(self): - for _, contents in super().contents.items(): - if 'subject' in contents['files']: - return contents - - def _clear_contents(self, to_be_removed): - for path in to_be_removed: - try: - del self._contents[path] - except KeyError: - self._dummy.append(path) - - @property - def path(self): - """ - Gets the path of the object. - - Returns: - str: The path of the object. - """ - return self._path - - @property - def avail(self): - """ - A property representing the available scans. - - Returns: - list: A list of available scans. - """ - return sorted(list(self._scans)) - - def get_scan(self, scan_id): - """ - Get a specific scan object by ID. - - Args: - scan_id (int): The ID of the scan object to retrieve. - - Returns: - object: The specified scan object. - - Raises: - KeyError: If the specified scan ID does not exist. - """ - return self._scans[scan_id] - - def __dir__(self): - return super().__dir__() + ['path', 'avail', 'get_scan'] diff --git a/brkraw/api/pvobj/pvfiles.py b/brkraw/api/pvobj/pvfiles.py index 931cb5b..6d519d5 100644 --- a/brkraw/api/pvobj/pvfiles.py +++ b/brkraw/api/pvobj/pvfiles.py @@ -1,49 +1,94 @@ -import os +"""Provides the PvFiles class for managing individual files within a Paravision dataset. + +This module includes the PvFiles class, derived from BaseMethods, specifically tailored to manage non-standard or loosely organized files within a dataset. It offers functionalities for dynamically handling arbitrary file inputs, making it versatile for datasets that do not conform to standard directory structures typically expected in Paravision studies. + +Classes: + PvFiles: Manages individual file access and operations, providing methods to handle arbitrary files efficiently and effectively. This class is especially useful for datasets that require flexible file management strategies. +""" + +from __future__ import annotations from .base import BaseMethods -from pathlib import Path +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from pathlib import Path class PvFiles(BaseMethods): + """Manages arbitrary files within a Paravision dataset, providing flexible file handling capabilities. + + This class extends BaseMethods to provide specialized handling of files that may not necessarily fit into + a structured directory or standardized dataset format. It is particularly useful for datasets where files + are spread across different locations or need to be accessed without a fixed directory structure. + + Attributes: + _path (list): A list of resolved file paths that are currently managed by this instance. + _contents (dict): A dictionary representing the contents currently available in this instance. + """ def __init__(self, *files: Path): - """_summary_ + """Initializes the PvFiles object with one or more files. Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir + *files (Path): An arbitrary number of Path objects pointing to the files to be managed. """ self.update(*files) def update(self, *files: Path): - self._path = [os.path.abspath(f) for f in files if os.path.exists(f)] - self._contents = {"files": [os.path.basename(f) for f in self._path], + """Updates the managed files in the PvFiles instance. + + Args: + *files (Path): An arbitrary number of Path objects pointing to the files to be managed. + + Notes: + This method updates the list of file paths and the contents dictionary based on the files provided. + """ + self + self._path = [self._resolve(f) for f in files if self._resolve(f).exists()] + self._contents = {"files": [f.name for f in self._path], "dirs": [], "file_indexes": []} - def _open_as_fileobject(self, key): - """Override open_as_fileobject method + def _open_as_fileobject(self, key: str): + """Opens a file as a file object based on the specified key. Args: - key: The key to identify the file. + key (str): The key or part of the file name to identify the file to open. Returns: - file object: The opened file object. + file object: The opened file object corresponding to the key. Raises: - ValueError: If the key does not exist in the files. + KeyError: If the file corresponding to the key does not exist in the managed files. """ if file_path := self._search_file_path(key): return open(file_path, 'rb') raise KeyError(f'Failed to find filename "{key}" from input files.\n [{self.contents.get("files")}]') - def _search_file_path(self, key): + def _search_file_path(self, key: str): + """Searches for a file path that includes the specified key. + + Args: + key (str): A substring of the file name to search for among the managed files. + + Returns: + str or False: The full path of the file if found, False otherwise. + """ if files := [f for f in self._path if key in f]: return files.pop() else: return False def get_visu_pars(self, _:None=None): - """ Mock function of PvScan """ + """A mock function to mimic getting 'visu_pars', typically used for testing or compatibility. + + Returns: + str: The contents of 'visu_pars' if it exists, mimics behavior of similar functions in related classes. + """ return getattr(self, 'visu_pars') @property def path(self): + """Returns the paths of the managed files. + + Returns: + list: A list of file paths being managed by this instance. + """ return self._path diff --git a/brkraw/api/pvobj/pvreco.py b/brkraw/api/pvobj/pvreco.py index 9440801..0656506 100644 --- a/brkraw/api/pvobj/pvreco.py +++ b/brkraw/api/pvobj/pvreco.py @@ -1,56 +1,77 @@ +"""Module providing the PvReco class, a component of Paravision Objects. + +The PvReco class is designed to manage individual reconstructions within a scan from Paravision datasets. +It extends the BaseMethods class to incorporate more specific functionalities such as managing compressed data formats and +directly handling the file paths and contents of reconstruction data. +The class is particularly geared towards handling the details at the reconstruction level, enabling detailed management and +access to specific types of imaging data. It includes functionalities to initialize reconstructions, update their contents, +and provide access paths, ensuring that data can be accessed and manipulated efficiently and effectively. + +Classes: + PvReco: Manages the data and processes related to individual reconstructions within a Paravision scan, providing tools + to handle and organize the specific data associated with those reconstructions. +""" + +from __future__ import annotations import os import warnings from .base import BaseMethods +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Tuple, Dict + from typing import Optional + from pathlib import Path class PvReco(BaseMethods): - """ - A class representing a PvReco object. + """Manages the reconstruction-specific data within a scan in a Paravision study. - Inherits from BaseMethods. - - Attributes: - is_compressed (bool): Indicates if the dataset is compressed. + This class extends `BaseMethods` to provide specialized handling of the data associated with a particular + reconstruction. It supports both compressed and uncompressed data formats and provides utilities to manage + and access reconstruction-specific details. + Attributes: + is_compressed (bool): Indicates whether the dataset is compressed, affecting how files are accessed and processed. + path (str): The file system path to the reconstruction's data. + scan_id (int): Identifier for the scan associated with this reconstruction. + reco_id (int): Identifier for this specific reconstruction. + Args: scan_id (int): The ID of the scan. reco_id (int): The ID of the reconstruction. - pathes (tuple): A tuple containing the root path and the path. - contents (list): A list of contents. - - Properties: - path (str): The path. + pathes (Tuple[Path, Path]): Contains the root path and specific reconstruction path. + contents (Optional[Dict], optional): Initial content data for the reconstruction. """ - def __init__(self, scan_id, reco_id, pathes, contents): - """ - Initialize a Dataset object. + def __init__(self, scan_id: int, reco_id: int, pathes: Tuple['Path', 'Path'], + contents: Optional['Dict']=None): + """Initializes the PvReco object with specified identifiers, paths, and optional contents. Args: - scan_id (int): The ID of the scan. - reco_id (int): The ID of the reconstruction. - pathes (tuple): A tuple containing the root path and the path. - contents (list): A list of contents. - - Attributes: - _scan_id (int): The ID of the scan. - _reco_id (int): The ID of the reconstruction. - _rootpath (str): The root path. - _path (str): The path. - _contents (list): The list of contents. + scan_id (int): The identifier of the scan to which this reconstruction belongs. + reco_id (int): The unique identifier for this reconstruction within its scan. + pathes (Tuple[Path, Path]): A tuple containing the root path and the specific path for this reconstruction. + contents (Dict, optional): A dictionary representing the initial contents of the reconstruction. + + Raises: + FileNotFoundError: If the provided paths do not exist or are not accessible. + ValueError: If the paths provided do not lead to expected data formats or locations. """ self._scan_id = scan_id self._reco_id = reco_id - self._rootpath, self._path = pathes + self._rootpath = self._resolve(pathes[0]) + self._path = self._resolve(pathes[1]) self._contents = contents self.is_compressed = True if contents.get('file_indexes') else False @property def path(self): - """ - A property representing the path. + """Constructs and returns the full filesystem path for this reconstruction. + + If the reconstruction data is compressed, this returns a tuple of paths; otherwise, + it combines them into a single filesystem path. Returns: - str: The path. + Union[Tuple[Path, Path], str]: The full path or paths to the reconstruction data. """ path = (self._rootpath, self._path) if self.is_compressed: @@ -58,5 +79,15 @@ def path(self): return os.path.join(*path) def get_fid(self): + """Issues a warning that the 'get_fid' method is not supported for PvReco objects. + + This method is typically used at the scan or study level, not at the reconstruction level. + + Returns: + None + + Raises: + Warning: Always warns that the method is not applicable for PvReco objects. + """ warnings.warn(f'{self.__class__} does not support get_fid method. use Scan- or Study-level object instead') return None \ No newline at end of file diff --git a/brkraw/api/pvobj/pvscan.py b/brkraw/api/pvobj/pvscan.py index 72e0aa3..00588f9 100644 --- a/brkraw/api/pvobj/pvscan.py +++ b/brkraw/api/pvobj/pvscan.py @@ -1,65 +1,68 @@ +"""Provides the PvScan class for managing individual scan data within a Paravision study. + +This module includes the PvScan class, derived from BaseMethods, to manage and interact with individual +scans and their respective reconstructions. It handles the organization, retrieval, and processing of scan-specific information, +supporting both compressed and uncompressed data formats. + +Classes: + PvScan: Manages a single scan's dataset, organizing reconstructions and handling specific data retrieval efficiently. +""" + from __future__ import annotations import os from collections import OrderedDict -from typing import Optional, Tuple, Dict, TYPE_CHECKING from .base import BaseMethods from .pvreco import PvReco +from typing import TYPE_CHECKING if TYPE_CHECKING: + from typing import Optional, Tuple, Dict from pathlib import Path class PvScan(BaseMethods): - """ - A class representing a PvScan object. + """Represents and manages an individual scan within a Paravision study dataset. - Inherits from BaseMethods. + Inherits from BaseMethods to utilize general methods for file handling and dataset validation. + Manages the data associated with a single scan, including various reconstructions, both compressed and uncompressed. Attributes: - is_compressed (bool): Indicates if the dataset is compressed. + is_compressed (bool): Indicates whether the scan's dataset is compressed, affecting how files are accessed and processed. + path (str): The file system path to the scan's dataset. + avail (list): A list of IDs representing the available reconstructions within the scan. + contents (dict): A structured dictionary representing the organized contents of the scan. Methods: - update(contents): Update the contents of the dataset. - set_reco(path, reco_id, contents): Set a reco object with the specified path, ID, and contents. - get_reco(reco_id): Get a specific reco object by ID. - - Properties: - path (str): The path. - avail (list): A list of available items. - contents (dict): A dictionary of pvscan contents. + update(contents): Updates the contents of the scan with new data. + set_reco(path, reco_id, contents): Initializes a PvReco object for a specific reconstruction. + get_reco(reco_id): Retrieves a PvReco object for a given reconstruction ID. """ def __init__(self, scan_id: Optional[int], pathes: Tuple[Path, Path], contents: Optional[Dict]=None, recos: Optional[OrderedDict]=None): - """ - Initialize a Dataset object. + """Initializes a PvScan object with the specified scan ID, paths, and optional contents and reconstructions. Args: scan_id (int): The ID of the scan. - pathes (tuple): A tuple containing the root path and the path. - contents (dict, optional): The initial contents of the dataset. Defaults to None. - recos (dict, optional): A dictionary of reco objects. Defaults to None. - - Attributes: - _scan_id (int): The ID of the scan. - _rootpath (str): The root path. - _path (str): The path. - _recos (OrderedDict): An ordered dictionary of reco objects. - - Methods: - update(contents): Update the contents of the dataset. + pathes (tuple): A tuple containing the root path and the specific scan path. + contents (dict, optional): The initial contents of the scan's dataset. Defaults to None. + recos (OrderedDict, optional): A dictionary of PvReco objects. Defaults to None. + + Raises: + FileNotFoundError: If the paths do not exist or are invalid. + ValueError: If the paths are neither directories nor recognizable compressed file formats. """ self._scan_id = scan_id - self._rootpath, self._path = pathes + self._rootpath = self._resolve(pathes[0]) + self._path = self._resolve(pathes[1]) self.update(contents) self._recos = OrderedDict(recos) if recos else OrderedDict() def update(self, contents: Dict): - """ - Update the contents of the dataset. + """pdates the contents of the scan's dataset. Args: - contents (dict): The new contents of the dataset. + contents (dict): The new contents to update the dataset with. Returns: None @@ -69,13 +72,12 @@ def update(self, contents: Dict): self._contents = contents def set_reco(self, path: Path, reco_id: int, contents: Dict): - """ - Set a reco object with the specified path, ID, and contents. + """Initializes and stores a PvReco object for a specific reconstruction within the scan. Args: - path (str): The path of the reco object. - reco_id (int): The ID of the reco object. - contents (list): The contents of the reco object. + path (Path): The path to the reconstruction data. + reco_id (int): The unique identifier for the reconstruction. + contents (Dict): The data associated with the reconstruction. Returns: None @@ -83,21 +85,38 @@ def set_reco(self, path: Path, reco_id: int, contents: Dict): self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) def get_reco(self, reco_id: int): - """ - Get a specific reco object by ID. + """Retrieves the PvReco object associated with the specified reconstruction ID. Args: - reco_id (int): The ID of the reco object to retrieve. + reco_id (int): The ID of the reconstruction to retrieve. Returns: - object: The specified reco object. + PvReco: The reconstruction object. Raises: - KeyError: If the specified reco ID does not exist. + KeyError: If the specified reconstruction ID does not exist within the scan. """ return self._recos[reco_id] def get_visu_pars(self, reco_id: Optional[int] = None): + """Retrieves visualization parameters ('visu_pars') for the scan or a specific reconstruction. + + This method attempts to find and return the 'visu_pars' file. It looks for this file in the following order: + 1. In a specific reconstruction, if `reco_id` is provided. + 2. Directly within the scan's own contents, if available. + 3. In the first available reconstruction that contains 'visu_pars'. + + Args: + reco_id (Optional[int]): The ID of the reconstruction from which to retrieve 'visu_pars'. If None, + the method searches across the scan and all its reconstructions. + + Returns: + The visualization parameters as specified in 'visu_pars'. + + Raises: + FileNotFoundError: If 'visu_pars' cannot be found in the specified reconstruction, within the scan, + or across any of the available reconstructions. + """ if reco_id: return getattr(self.get_reco(reco_id), 'visu_pars') elif 'visu_pars' in self.contents['files']: @@ -111,11 +130,10 @@ def get_visu_pars(self, reco_id: Optional[int] = None): @property def path(self): - """ - A property representing the path. + """Provides the combined filesystem path of the scan's dataset. Returns: - str: The path. + str: The full path combining the root and specific scan path. """ path = (self._rootpath, self._path) if self.is_compressed: @@ -124,10 +142,9 @@ def path(self): @property def avail(self): - """ - A property representing the available items. + """Provides a list of available reconstruction IDs within the scan. Returns: - list: A list of available items. + list: A sorted list of available reconstruction IDs. """ return sorted(list(self._recos)) \ No newline at end of file diff --git a/brkraw/api/pvobj/pvstudy.py b/brkraw/api/pvobj/pvstudy.py new file mode 100755 index 0000000..fd69bc8 --- /dev/null +++ b/brkraw/api/pvobj/pvstudy.py @@ -0,0 +1,193 @@ +"""Provides the PvStudy class, which serves as a comprehensive handler for entire Paravision study datasets. + +This module includes the PvStudy class, derived from BaseMethods, to manage and interact with datasets that may +include multiple scans and various data types, both compressed and uncompressed. It facilitates the organization, +retrieval, and processing of study-specific information and individual scans, enhancing the handling of complex +imaging data. + +Classes: + PvStudy: Manages an entire study's dataset, organizing scans and handling specific data retrieval efficiently. +""" + +from __future__ import annotations +import re +import zipfile +from collections import OrderedDict +from .base import BaseMethods +from .pvscan import PvScan +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from pathlib import Path + + +class PvStudy(BaseMethods): + """Represents and manages an entire Paravision study dataset. + + Inherits from BaseMethods to utilize general methods for file handling and dataset validation. + Manages multiple scans and their respective data, supporting both compressed and uncompressed formats. + + Attributes: + is_compressed (bool): Indicates whether the dataset is compressed, affecting how files are accessed and processed. + path (str): The file system path to the study dataset. + avail (list): A list of IDs representing the available scans within the dataset. + contents (dict): A structured dictionary representing the organized contents of the dataset. + + Methods: + get_scan(scan_id): Retrieves a PvScan object for a given scan ID, facilitating detailed access to specific scans. + """ + def __init__(self, path: Path, debug: bool=False): + """Initializes a PvStudy object with the specified path and debug settings. + + Args: + path (Path): The filesystem path to the dataset. + debug (bool, optional): If set to True, enables debug mode which may affect logging and error reporting. + + Raises: + FileNotFoundError: If the path does not exist or is invalid. + ValueError: If the path is neither a directory nor a recognizable compressed file format. + """ + if not debug: + self._check_dataset_validity(self._resolve(path)) + self._construct() + + # internal method + def _check_dataset_validity(self, path: Path): + """Validates the provided path to ensure it points to a viable dataset. + + Args: + path (Path): The path to validate. + + Raises: + FileNotFoundError: If the path does not exist. + ValueError: If the path is neither a directory nor a valid compressed file. + """ + self._path = path + if not self._path.exists(): + raise FileNotFoundError(f"The path '{self._path}' does not exist.") + if self._path.is_dir(): + self._contents = self._fetch_dir(self._path) + self.is_compressed = False + elif self._path.is_file() and zipfile.is_zipfile(self._path): + self._contents = self._fetch_zip(self._path) + self.is_compressed = True + else: + raise ValueError(f"The path '{self._path}' does not meet the required criteria.") + + def _construct(self): + """Organizes the dataset contents by parsing directories and files, structuring them for easy access. + + Processes directories to segregate scans and their respective data, handling both uncompressed and compressed datasets. + """ + self._scans = OrderedDict() + self._backup = OrderedDict() + + to_remove = [] + for path, contents in self._contents.items(): + if not path: + self._root = contents + to_remove.append(path) + elif not contents['files']: + to_remove.append(path) + elif matched := re.match(r'(?:.*/)?(\d+)/(\D+)/(\d+)$', path) or re.match(r'(?:.*/)?(\d+)$', path): + to_remove.append(self._process_childobj(matched, (path, contents))) + self._clear_contents(to_remove) + + def _process_childobj(self, matched, item): + """The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. + + Args: + matched: A `re.Match` object representing the matched pattern. + item: A tuple containing the path and contents of the child object. + **kwargs: Additional keyword arguments. + + Returns: + str: The path of the processed child object. + """ + path, contents = item + scan_id = int(matched.group(1)) + if scan_id not in self._scans: + self._scans[scan_id] = PvScan(scan_id, (self.path, path)) + if len(matched.groups()) == 1 and 'pdata' in contents['dirs']: + self._scans[scan_id].update(contents) + elif len(matched.groups()) == 3 and matched.group(2) == 'pdata': + reco_id = int(matched.group(3)) + self._scans[scan_id].set_reco(path, reco_id, contents) + else: + self._backup[path] = contents + return path + + @property + def contents(self): + """Retrieves the contents of the study that include 'subject' in their files list. + + This property filters the study's dataset contents, returning only those parts of the dataset + where the 'subject' file is present, which is typically critical for study-specific information. + + Returns: + dict: The dictionary of contents that includes 'subject' among its files. + """ + for _, contents in super().contents.items(): + if 'subject' in contents['files']: + return contents + + def _clear_contents(self, to_be_removed): + """Clears specified contents from the dataset's memory structure. + + This method attempts to remove paths listed in `to_be_removed` from the dataset's content dictionary. + If a path cannot be found (i.e., it's already been removed or never existed), it logs the path to `_dummy` + for further debugging or inspection. + + Args: + to_be_removed (list): A list of paths to be removed from the dataset's contents. + + Returns: + None + + Notes: + The `_dummy` list can be used to track removal errors or inconsistencies in the dataset's path management. + """ + for path in to_be_removed: + try: + del self._contents[path] + except KeyError: + self._dummy.append(path) + + @property + def path(self): + """Returns the filesystem path of the study dataset. + + Returns: + str: The path to the dataset. + """ + return self._path + + @property + def avail(self): + """Provides a list of available scan IDs within the dataset. + + Returns: + list: A sorted list of available scan IDs. + """ + return sorted(list(self._scans)) + + def get_scan(self, scan_id: int): + """Retrieves the scan object associated with the specified scan ID. + + Args: + scan_id (int): The unique identifier for the scan. + + Returns: + PvScan: The scan object associated with the given ID. + + Raises: + KeyError: If there is no scan associated with the provided ID. + """ + return self._scans[scan_id] + + def __dir__(self): + """Customizes the directory listing to include specific attributes and methods. + + Returns: + list: A list of attribute names and methods available in this object. + """ + return super().__dir__() + ['path', 'avail', 'get_scan'] diff --git a/brkraw/api/pvobj/types.py b/brkraw/api/pvobj/types.py new file mode 100644 index 0000000..3d2812d --- /dev/null +++ b/brkraw/api/pvobj/types.py @@ -0,0 +1,24 @@ +from io import BufferedReader +from zipfile import ZipExtFile +from typing import Type +from typing import Union +from .pvscan import PvScan +from .pvstudy import PvStudy +from .pvreco import PvReco +from .pvfiles import PvFiles +from .parameters import Parameter + + +PvFileBuffer = Type[Union[BufferedReader, ZipExtFile]] + +PvStudyType = Type[PvStudy] + +PvScanType = Type[PvScan] + +PvRecoType = Type[PvReco] + +PvFilesType = Type[PvFiles] + +ParameterType = Type[Parameter] + +PvObjType = Type[Union[PvScan, PvReco, PvFiles]] \ No newline at end of file diff --git a/brkraw/app/backup/__init__.py b/brkraw/app/backup/__init__.py new file mode 100644 index 0000000..57ad8df --- /dev/null +++ b/brkraw/app/backup/__init__.py @@ -0,0 +1,41 @@ +"""provide all conventional function with backward compatibility, but also provide function to send file via FTP server +as well as compress only file needed +""" + +import argparse +from brkraw import __version__ + +def main(): + """main script allows convert brkraw + provide list function of all available converting mode (including plugin) + """ + parser = argparse.ArgumentParser(prog='brk_tonifti', + description="BrkRaw command-line interface for NifTi conversion") + parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) + + subparsers = parser.add_subparsers(title='Sub-commands', + description='To run this command, you must specify one of the functions listed' + 'below next to the command. For more information on each function, ' + 'use -h next to the function name to call help document.', + help='description', + dest='function', + metavar='command') + + input_str = "input raw Bruker data" + input_dir_str = "input directory that contains multiple raw Bruker data" + output_dir_str = "output directory name" + output_fnm_str = "output filename" + bids_opt = "create a JSON file contains metadata based on BIDS recommendation" + + info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') + + scan = subparsers.add_parser("scan", help='Convert a single raw Bruker data into NifTi file(s)') + study = subparsers.add_parser("study", help="Convert All raw Bruker data located in the input directory") + dataset = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") + + # info + info.add_argument("input", help=input_str, type=str) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/brkraw/app/backup/cache.py b/brkraw/app/backup/cache.py new file mode 100644 index 0000000..04cad7d --- /dev/null +++ b/brkraw/app/backup/cache.py @@ -0,0 +1,175 @@ +from brkraw.app.tonifti import StudyToNifti + +import os +import datetime + + + +class NamedTuple(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +class BackupCache: + def __init__(self): + self._init_dataset_class() + + def logging(self, message, method): + now = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + self.log_data.append(NamedTuple(datetime=now, method=method, message=message)) + + @property + def num_raw(self): + return len(self.raw_data) + #TODO: need to check if the space enough to perform backup, as well as handle the crash event + #during the backup (the cache updated even the backup failed) + + @property + def num_arc(self): + return len(self.arc_data) + + def _init_dataset_class(self): + # dataset + self.raw_data = [] + self.arc_data = [] + self.log_data = [] + + def get_rpath_obj(self, path, by_arc=False): + if len(self.raw_data): + if by_arc: + data_pid = [b.data_pid for b in self.arc_data if b.path == path] + if len(data_pid): + rpath_obj = [r for r in self.raw_data if r.data_pid == data_pid[0]] + if len(rpath_obj): + return rpath_obj[0] + else: + return None + else: + return None + else: + rpath_obj = [r for r in self.raw_data if r.path == path] + if len(rpath_obj): + return rpath_obj[0] + else: + return None + else: + return None + + def get_bpath_obj(self, path, by_raw=False): + if len(self.arc_data): + if by_raw: + r = self.get_rpath_obj(path) + if r is None: + return [] + else: + return [b for b in self.arc_data if b.data_pid == r.data_pid] + else: + data_pid = [b for b in self.arc_data if b.path == path][0].data_pid + return [b for b in self.arc_data if b.data_pid == data_pid] + else: + return [] + + def isin(self, path, raw=True): + if raw: + list_data = self.raw_data + else: + list_data = self.arc_data + _history = [d for d in list_data if d.path == path] + if len(_history): + return True + else: + return False + + def set_raw(self, dirname, raw_dir, removed=False): + # rawobj: data_pid, path, garbage, removed, backup + if not removed: + dir_path = os.path.join(raw_dir, dirname) + if not self.isin(dirname, raw=True): # continue if the path is not saved in this cache obj + if os.path.isdir(dir_path): + raw = StudyToNifti(dir_path) + garbage = False if raw.is_pvdataset else True + rawobj = NamedTuple(data_pid=self.num_raw, + path=dirname, + garbage=garbage, + removed=removed, + backup=False) + self.raw_data.append(rawobj) + else: + self.logging('{} is not a valid directory. [raw dataset must be a directory]'.format(dir_path), + 'set_raw') + else: + rawobj = NamedTuple(data_pid=self.num_raw, + path=dirname, + garbage=None, + removed=removed, + backup=True) + self.raw_data.append(rawobj) + + def set_arc(self, arc_fname, arc_dir, raw_dir): + # arcobj: data_pid, path, garbage, crashed, issued + arc_path = os.path.join(arc_dir, arc_fname) + + if not self.isin(arc_fname, raw=False): # continue if the path is not saved in this cache obj + issued = False + try: + arc = StudyToNifti(arc_path) + raw_dname = arc.pvobj.path + raw_path = os.path.join(raw_dir, raw_dname) + garbage = False if arc.is_pvdataset else True + crashed = False + except: + self.logging('{} is crashed.'.format(arc_path), + 'set_arc') + arc = None + raw_dname = None + raw_path = None + garbage = True + crashed = True + + if raw_dname != None: + r = self.get_rpath_obj(raw_dname) + else: + r = None + + if r is None: + raw_dname = os.path.splitext(arc_fname)[0] + self.set_raw(raw_dname, raw_dir, removed=True) + r = self.get_rpath_obj(raw_dname) + r.garbage = garbage + if crashed: + issued = True + else: + if arc is None: + issued = True + else: + if not r.removed: + if not r.backup: + pass + else: + raw = StudyToNifti(raw_path) + if raw.num_recos != arc.num_recos: + issued = True + arcobj = NamedTuple(data_pid=r.data_pid, + path=arc_fname, + garbage=garbage, + crashed=crashed, + issued=issued) + if not crashed: + if not issued: + # backup completed data must has no issue + r.backup = True + + self.arc_data.append(arcobj) + + def is_duplicated(self, file_path, by_arc=False): + if by_arc: + b = self.get_bpath_obj(file_path, by_raw=False) + else: + b = self.get_bpath_obj(file_path, by_raw=True) + if len(b) > 1: + return True + else: + return False + + + diff --git a/brkraw/app/backup/handler.py b/brkraw/app/backup/handler.py new file mode 100644 index 0000000..c48722d --- /dev/null +++ b/brkraw/app/backup/handler.py @@ -0,0 +1,477 @@ +import os +from brkraw.app.tonifti import StudyToNifti +from brkraw.api.config.utils.functools import get_dirsize, \ + get_filesize, yes_or_no, print_internal_error, TimeCounter +import sys +import datetime +import tqdm +import pickle +import zipfile +from .cache import BackupCache +import pickle +import getpass + + +_bar_fmt = '{l_bar}{bar:20}{r_bar}{bar:-20b}' +_user = getpass.getuser() +_width = 80 +_line_sep_1 = '-' * _width +_line_sep_2 = '=' * _width +_empty_sep = '' + +class BackupCacheHandler: + def __init__(self, raw_path, backup_path, fname='.brk-backup_cache'): + """ Handler class for backup data + + Args: + raw_path: path for raw dataset + backup_path: path for backup dataset + fname: file name to pickle cache data + """ + self._cache = None + self._rpath = os.path.expanduser(raw_path) + self._apath = os.path.expanduser(backup_path) + self._cache_path = os.path.join(self._apath, fname) + self._load_pickle() + # self._parse_info() + + def _load_pickle(self): + if os.path.exists(self._cache_path): + try: + with open(self._cache_path, 'rb') as cache: + self._cache = pickle.load(cache) + except EOFError: + os.remove(self._cache_path) + self._cache = BackupCache() + else: + self._cache = BackupCache() + self._save_pickle() + + def _save_pickle(self): + with open(self._cache_path, 'wb') as f: + pickle.dump(self._cache, f) + + def logging(self, message, method): + method = 'Handler.{}'.format(method) + self._cache.logging(message, method) + + @property + def is_duplicated(self): + return self._cache.is_duplicated + + @property + def get_rpath_obj(self): + return self._cache.get_rpath_obj + + @property + def get_bpath_obj(self): + return self._cache.get_bpath_obj + + @property + def arc_data(self): + return self._cache.arc_data + + @property + def raw_data(self): + return self._cache.raw_data + + @property + def scan(self): + return self._parse_info + + def _parse_info(self): + print('\n-- Parsing metadata from the raw and archived directories --') + list_of_raw = sorted([d for d in os.listdir(self._rpath) if + os.path.isdir(os.path.join(self._rpath, d)) and 'import' not in d]) + list_of_brk = sorted([d for d in os.listdir(self._apath) if + (os.path.isfile(os.path.join(self._apath, d)) and + (d.endswith('zip') or d.endswith('PvDatasets')))]) + + # parse dataset + print('\nScanning raw datasets and update cache...') + for r in tqdm.tqdm(list_of_raw, bar_format=_bar_fmt): + self._cache.set_raw(r, raw_dir=self._rpath) + self._save_pickle() + + print('\nScanning archived datasets and update cache...') + for b in tqdm.tqdm(list_of_brk, bar_format=_bar_fmt): + self._cache.set_arc(b, arc_dir=self._apath, raw_dir=self._rpath) + self._save_pickle() + + # update raw dataset information (raw dataset cache will remain even its removed) + print('\nScanning raw dataset cache...') + for r in tqdm.tqdm(self.raw_data[:], bar_format=_bar_fmt): + if r.path != None: + if not os.path.exists(os.path.join(self._rpath, r.path)): + if not r.removed: + r.removed = True + self._save_pickle() + + print('\nReviewing the cached information...') + for b in tqdm.tqdm(self.arc_data[:], bar_format=_bar_fmt): + arc_path = os.path.join(self._apath, b.path) + if not os.path.exists(arc_path): # backup dataset is not existing, remove the cache + self.arc_data.remove(b) + else: # backup dataset is existing then check status again + if b.issued: # check if the issue has benn resolved. + if b.crashed: # check if the dataset re-backed up. + if zipfile.is_zipfile(arc_path): + b.crashed = False # backup success! + b.issued = False if self.is_same_as_raw(b.path) else True + if b.issued: + if b.garbage: + if StudyToNifti(arc_path).is_pvdataset: + b.garbage = False + # else the backup dataset it still crashed. + else: # the dataset has an issue but not crashed, so check if the issue has been resolved. + b.issued = False if self.is_same_as_raw(b.path) else True + if not b.issued: # if issue resolved + r = self.get_rpath_obj(b.path, by_arc=True) + r.backup = True + else: # if no issue with the dataset, do nothing. + r = self.get_rpath_obj(b.path, by_arc=True) + if not r.backup: + r.backup = True + self._save_pickle() + + def is_same_as_raw(self, filename): + arc = StudyToNifti(os.path.join(self._apath, filename)) + if arc.pvobj.path != None: + raw_path = os.path.join(self._rpath, arc.pvobj.path) + if os.path.exists(raw_path): + raw = StudyToNifti(raw_path) + return arc.num_recos == raw.num_recos + else: + return None + else: + return None + + def get_duplicated(self): + duplicated = dict() + for b in self.arc_data: + if self.is_duplicated(b.path, by_arc=True): + rpath = self.get_rpath_obj(b.path, by_arc=True).path + if rpath in duplicated.keys(): + duplicated[rpath].append(b.path) + else: + duplicated[rpath] = [b.path] + else: + pass + return duplicated + + def get_list_for_backup(self): + return [r for r in self.get_incompleted() if not r.garbage] + + def get_issued(self): + return [b for b in self.arc_data if b.issued] + + def get_crashed(self): + return [b for b in self.arc_data if b.crashed] + + def get_incompleted(self): + return [r for r in self.raw_data if not r.backup] + + def get_completed(self): + return [r for r in self.raw_data if r.backup] + + def get_garbage(self): + return [b for b in self.arc_data if b.garbage] + + @staticmethod + def _gen_header(title, width=_width): + lines = [] + gen_by = 'Generated by {}'.format(_user).rjust(width) + + lines.append(_empty_sep) + lines.append(_line_sep_2) + lines.append(_empty_sep) + lines.append(title.center(width)) + lines.append(gen_by) + lines.append(_line_sep_2) + lines.append(_empty_sep) + return lines + + def _get_backup_status(self): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + lines = self._gen_header('Report of the status of archived data [{}]'.format(now)) + list_need_to_be_backup = self.get_list_for_backup()[:] + total_list = len(list_need_to_be_backup) + if len(list_need_to_be_backup): + lines.append('>> The list of raw data need to be archived.') + lines.append('[Note: The list exclude the raw data does not contain any binary file]') + lines.append(_line_sep_1) + lines.append('{}{}'.format('Rawdata Path'.center(_width-10), 'Size'.rjust(10))) + for r in list_need_to_be_backup: + if len(r.path) > _width-10: + path_name = '{}... '.format(r.path[:_width-14]) + else: + path_name = r.path + raw_path = os.path.join(self._rpath, r.path) + dir_size, unit = get_dirsize(raw_path) + if unit == 'B': + dir_size = '{} {}'.format(dir_size, unit).rjust(10) + else: + dir_size = '{0:.2f}{1}'.format(dir_size, unit).rjust(10) + lines.append('{}{}'.format(path_name.ljust(_width-10), dir_size)) + lines.append(_line_sep_1) + lines.append(_empty_sep) + + list_issued = self.get_issued() + total_list += len(list_issued) + if len(list_issued): + lines.append('>> Failed or incompleted archived data.') + lines.append('[Note: The listed data are either crashed or incompleted]') + lines.append(_line_sep_1) + lines.append('{}{}{}'.format('Archived Path'.center(60), + 'Condition'.rjust(10), + 'Size'.rjust(10))) + for b in self.get_issued(): + if len(b.path) > _width-20: + path_name = '{}... '.format(b.path[:_width-24]) + else: + path_name = b.path + arc_path = os.path.join(self._apath, b.path) + file_size, unit = get_filesize(arc_path) + if b.crashed: + raw_path = self.get_rpath_obj(b.path, by_arc=True).path + if raw_path is None: + condition = 'Failed' + else: + condition = 'Crashed' + else: + condition = 'Issued' + if unit == 'B': + file_size = '{} {}'.format(file_size, unit).rjust(10) + else: + file_size = '{0:.2f}{1}'.format(file_size, unit).rjust(10) + lines.append('{}{}{}'.format(path_name.ljust(_width-20), + condition.center(10), + file_size)) + lines.append(_line_sep_1) + lines.append(_empty_sep) + + list_duplicated = self.get_duplicated() + total_list += len(list_duplicated) + if len(list_duplicated.keys()): + lines.append('>> List of duplicated archived data.') + lines.append('[Note: The listed raw data has been archived into multiple files]') + lines.append(_line_sep_1) + lines.append('{} {}'.format('Raw Path'.center(int(_width/2)-1), + 'Archived'.center(int(_width/2)-1))) + for rpath, bpaths in list_duplicated.items(): + if rpath is None: + rpath = '-- Removed --' + if len(rpath) > int(_width/2)-1: + rpath = '{}... '.format(rpath[:int(_width/2)-5]) + for i, bpath in enumerate(bpaths): + if len(bpath) > int(_width/2)-1: + bpath = '{}... '.format(bpath[:int(_width/2)-5]) + if i == 0: + lines.append('{}:-{}'.format(rpath.ljust(int(_width/2)-1), + bpath.ljust(int(_width/2)-1))) + else: + lines.append('{} -{}'.format(''.center(int(_width/2)-1), + bpath.ljust(int(_width/2)-1))) + lines.append(_line_sep_1) + lines.append(_empty_sep) + + if total_list == 0: + lines.append(_empty_sep) + lines.append('The status of archived data is up-to-date...'.center(80)) + lines.append(_empty_sep) + lines.append(_line_sep_1) + return '\n'.join(lines) + + def print_status(self, fobj=sys.stdout): + summary = self._get_backup_status() + print(summary, file=fobj) + + def print_completed(self, fobj=sys.stdout): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + lines = self._gen_header('List of archived dataset [{}]'.format(now)) + list_of_completed = self.get_completed() + if len(list_of_completed): + lines.append(_line_sep_1) + lines.append('{}{}{}'.format('Rawdata Path'.center(_width - 20), + 'Removed'.rjust(10), + 'Archived'.rjust(10))) + for r in list_of_completed: + if len(r.path) > _width - 20: + path_name = '{}... '.format(r.path[:_width - 24]) + else: + path_name = r.path + removed = 'True' if r.removed else 'False' + archived = 'True' if r.backup else 'False' + lines.append('{}{}{}'.format(path_name.ljust(_width - 20), + removed.center(10), + archived.center(10))) + lines.append(_line_sep_1) + lines.append(_empty_sep) + else: + lines.append(_empty_sep) + lines.append('No archived data...'.center(80)) + lines.append(_empty_sep) + lines.append(_line_sep_1) + summary = '\n'.join(lines) + print(summary, file=fobj) + + def clean(self): + print('\n[Warning] The archived data that contains any issue will be deleted by this command ' + 'and it cannot be revert.') + print(' Prior to run this, please update the cache for data status using "review" function.\n') + ans = yes_or_no('Are you sure to continue?') + + if ans: + list_data = dict(issued=self.get_issued()[:], + garbage=self.get_garbage()[:], + crashed=self.get_crashed()[:], + duplicated=self.get_duplicated().copy()) + for label, dset in list_data.items(): + if label == 'duplicated': + print('\nStart removing {} archived data...'.format(label.upper())) + if len(dset.items()): + for raw_dname, arcs in dset.items(): + if raw_dname != None: + raw_path = os.path.join(self._rpath, raw_dname) + if os.path.exists(raw_path): + r_size, r_unit = get_dirsize(raw_path) + r_size = '{0:.2f} {1}'.format(r_size, r_unit) + else: + r_size = 'Removed' + if len(raw_dname) < 60: + raw_dname = '{}...'.format(raw_dname[:56]) + else: + r_size = 'Removed' + raw_dname = 'No name' + print('Raw dataset: [{}] {}'.format(raw_dname.ljust(60), r_size.rjust(10))) + num_dup = len(arcs) + dup_list = [' +-{}'] * num_dup + print('\n'.join(dup_list).format(*arcs)) + for arc_fname in arcs: + path_to_clean = os.path.join(self._apath, arc_fname) + ans_4rm = yes_or_no(' - Are you sure to remove [{}] ?\n '.format(arc_fname)) + if ans_4rm: + try: + os.remove(path_to_clean) + a = self.get_bpath_obj(arc_fname) + if len(a): + self.arc_data.remove(a[0]) + except OSError: + error = NotImplementedError(path_to_clean) + self.logging(error.message, 'clean') + print(' Failed! The file is locked.') + else: + raise NotImplementedError + else: + if len(dset): + print('\nStart removing {} archived data...'.format(label.upper())) + + def ask_to_remove(): + ans_4rm = yes_or_no(' - Are you sure to remove [{}] ?\n '.format(path_to_clean)) + if ans_4rm: + try: + os.remove(path_to_clean) + self.arc_data.remove(a) + except OSError: + error = NotImplementedError(path_to_clean) + self.logging(error.message, 'clean') + print(' Failed! The file is locked.') + else: + raise NotImplementedError + for a in dset: + path_to_clean = os.path.join(self._apath, a.path) + if label == 'issued': + if a.garbages or a.crashed: + pass + else: + ask_to_remove() + elif label == 'garbage': + if a.crashed: + pass + else: + ask_to_remove() + self._save_pickle() + + def backup(self, fobj=sys.stdout): + list_raws = self.get_list_for_backup()[:] + list_issued = self.get_issued()[:] + print('\nStarting backup for raw data not listed in the cache...') + self.logging('Archiving process starts...', 'backup') + + for i, dlist in enumerate([list_raws, list_issued]): + if i == 0: + print('\n[step1] Archiving the raw data that has not been archived.') + self.logging('Archive the raw data has not been archived...', 'backup') + elif i == 1: + print('\n[step2] Archiving the data that has issued on archived data.') + self.logging('Archive the raw data contains any issue...', 'backup') + + for r in tqdm.tqdm(dlist, unit=' dataset(s)', bar_format=_bar_fmt): + run_backup = True + raw_path = os.path.join(self._rpath, r.path) + arc_path = os.path.join(self._apath, '{}.zip'.format(r.path)) + tmp_path = os.path.join(self._apath, '{}.part'.format(r.path)) + if os.path.exists(raw_path): + if os.path.exists(tmp_path): + print(' -[{}] is detected and removed...'.format(tmp_path), file=fobj) + os.unlink(tmp_path) + if os.path.exists(arc_path): + if not zipfile.is_zipfile(arc_path): + print(' -[{}] is crashed file, removing...'.format(arc_path), file=fobj) + os.unlink(arc_path) + else: + arc = StudyToNifti(arc_path) + raw = StudyToNifti(raw_path) + if arc.is_pvdataset: + if arc.num_recos != raw.num_recos: + print(' - [{}] is mismatching with the corresponding raw data, ' + 'removing...'.format(arc_path), file=fobj) + os.unlink(arc_path) + else: + run_backup = False + else: + print(' - [{}] is mismatching with the corresponding raw data, ' + 'removing...'.format(arc_path), file=fobj) + os.unlink(arc_path) + if run_backup: + print('\n :: Compressing [{}]...'.format(raw_path), file=fobj) + # Compressing + timer = TimeCounter() + try: # exception handling in case compression is failed + with zipfile.ZipFile(tmp_path, 'w') as zip: + # prepare file counters for use of tqdm + file_counter = 0 + for _ in os.walk(raw_path): + file_counter += 1 + + for i, (root, dirs, files) in tqdm.tqdm(enumerate(os.walk(raw_path)), + bar_format=_bar_fmt, + total=file_counter, + unit=' file(s)'): + splitted_root = root.split(os.sep) + if i == 0: + root_idx = splitted_root.index(r.path) + for f in files: + arc_name = os.sep.join(splitted_root[root_idx:] + [f]) + zip.write(os.path.join(root, f), arcname=arc_name) + print(' - [{}] is created.'.format(os.path.basename(arc_path)), file=fobj) + + except Exception: + print_internal_error(fobj) + error = NotImplementedError(raw_path) + self.logging(error.message, 'backup') + raise error + + print(' - processed time: {} sec'.format(timer.time()), file=fobj) + + # Backup validation + if not os.path.exists(tmp_path): # Check if the file is generated + error = NotImplementedError(raw_path) + self.logging(error.message, 'backup') + raise error + else: + try: + os.rename(tmp_path, arc_path) + except: + print_internal_error(fobj) + raise NotImplementedError \ No newline at end of file diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index 31b1e38..88bbc6b 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -2,75 +2,81 @@ dependency: bids, plugin """ -from brkraw import __version__ -from .base import BasePlugin, PvScan, PvReco, PvFiles -from .study import StudyToNifti, ScanToNifti -import argparse +from brkraw import __version__, config +from xnippet.module import ModuleCommander +from brkraw.app.tonifti.plugin import ToNiftiPlugin, PvScan, PvReco, PvFiles +from brkraw.app.tonifti.study import StudyToNifti, ScanToNifti -__all__ = ['BasePlugin', 'StudyToNifti', 'ScanToNifti', 'PvScan', 'PvReco', 'PvFiles'] +tonifti_config = config.config['app']['tonifti'] +# tonifti_presets = config.get_fetcher('preset') -def main(): - """main script allows convert brkraw - provide list function of all available converting mode (including plugin) - """ - parser = argparse.ArgumentParser(prog='brk_tonifti', - description="BrkRaw command-line interface for NifTi conversion") - parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) +__all__ = ['ToNiftiPlugin', 'StudyToNifti', 'ScanToNifti', 'PvScan', 'PvReco', 'PvFiles'] - subparsers = parser.add_subparsers(title='Sub-commands', - description='To run this command, you must specify one of the functions listed' - 'below next to the command. For more information on each function, ' - 'use -h next to the function name to call help document.', - help='description', - dest='function', - metavar='command') +# def main(): +# """main script allows convert brkraw +# provide list function of all available converting mode (including plugin) +# """ +# parser =ArgParser(prog='brkraw-tonifti', +# description="BrkRaw command-line interface for converting to NifTi1 format") +# parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) - input_str = "input raw Bruker data" - input_dir_str = "input directory that contains multiple raw Bruker data" - output_dir_str = "output directory name" - output_fnm_str = "output filename" - bids_opt = "create a JSON file contains metadata based on BIDS recommendation" +# subparsers = parser.add_subparsers(title='Sub-commands', +# description='To run this command, you must specify one of the functions listed' +# 'below next to the command. For more information on each function, ' +# 'use -h next to the function name to call help document.', +# help='description', +# dest='function', +# metavar='command') - info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') - - scan = subparsers.add_parser("scan", help='Convert a single raw Bruker data into NifTi file(s)') - study = subparsers.add_parser("study", help="Convert All raw Bruker data located in the input directory") - dataset = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") +# input_str = "input raw Bruker data" +# input_dir_str = "input directory that contains multiple raw Bruker data" +# output_dir_str = "output directory name" +# output_fnm_str = "output filename" +# bids_opt = "create a JSON file contains metadata based on BIDS recommendation" - # info - info.add_argument("input", help=input_str, type=str) +# info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') +# dataset = subparsers.add_parser("dataset", help="Convert a multiple PvDatasets into NifTi file(s)") +# study = subparsers.add_parser("study", help="Convert a whole Scans in PvDataset into NifTi file(s)") +# scan = subparsers.add_parser("scan", help='Convert a Scan folder in PvDataset into NifTi file(s)') +# reco = subparsers.add_parser("reco", help='Convert a Reco folder in PvDataset into NifTi file(s)') +# files = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") +# plugin = - # tonii - scan.add_argument("input", help=input_str, type=str) - scan.add_argument("-b", "--bids", help=bids_opt, action='store_true') - scan.add_argument("-o", "--output", help=output_fnm_str, type=str, default=False) - scan.add_argument("-s", "--scanid", help="Scan ID, option to specify a particular scan to convert.", type=str) - scan.add_argument("-r", "--recoid", help="RECO ID (default=1), " - "option to specify a particular reconstruction id to convert", - type=int, default=1) - scan.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ - "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) - scan.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ - "the position variable can be defiend as _, " + \ - "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) - scan.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') - scan.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') - scan.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') - scan.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true', default=True) +# # info +# info.add_argument("input", help=input_str, type=str) - # tonii_all - dataset.add_argument("input", help=input_dir_str, type=str) - dataset.add_argument("-o", "--output", help=output_dir_str, type=str) - dataset.add_argument("-b", "--bids", help=bids_opt, action='store_true') - dataset.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ - "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) - dataset.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ - "the position variable can be defiend as _, " + \ - "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) - dataset.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') - dataset.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') - dataset.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') - dataset.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true') +# # tonii +# scan.add_argument("input", help=input_str, type=str) +# scan.add_argument("-b", "--bids", help=bids_opt, action='store_true') +# scan.add_argument("-o", "--output", help=output_fnm_str, type=str, default=False) +# scan.add_argument("-s", "--scanid", help="Scan ID, option to specify a particular scan to convert.", type=str) +# scan.add_argument("-r", "--recoid", help="RECO ID (default=1), " +# "option to specify a particular reconstruction id to convert", +# type=int, default=1) +# scan.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ +# "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) +# scan.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ +# "the position variable can be defiend as _, " + \ +# "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) +# scan.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') +# scan.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') +# scan.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') +# scan.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true', default=True) + +# # tonii_all +# dataset.add_argument("input", help=input_dir_str, type=str) +# dataset.add_argument("-o", "--output", help=output_dir_str, type=str) +# dataset.add_argument("-b", "--bids", help=bids_opt, action='store_true') +# dataset.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ +# "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) +# dataset.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ +# "the position variable can be defiend as _, " + \ +# "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) +# dataset.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') +# dataset.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') +# dataset.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') +# dataset.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true') if __name__ == '__main__': - main() \ No newline at end of file + # main() + print(config) \ No newline at end of file diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 064715c..8d72443 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -1,35 +1,26 @@ from __future__ import annotations import warnings import numpy as np -import nibabel as nib -from enum import Enum -from pathlib import Path +from brkraw import config +from nibabel.nifti1 import Nifti1Image from .header import Header from brkraw.api.pvobj.base import BaseBufferHandler -from brkraw.api.pvobj import PvScan, PvReco, PvFiles from brkraw.api.data import Scan +from xnippet.snippet import PlugInSnippet from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Optional, Union - from brkraw.api.plugin import Plugged - - -XYZT_UNITS = \ - dict(EPI=('mm', 'sec')) + from typing import Optional, Union, Literal + from typing import List + from numpy.typing import NDArray + from xnippet.types import XnippetManagerType -class ScaleMode(Enum): - NONE = 0 - APPLY = 1 - HEADER = 2 - - class BaseMethods(BaseBufferHandler): - def set_scale_mode(self, scale_mode:Optional[ScaleMode]=None): - if scale_mode: - self.scale_mode = scale_mode - else: - self.scale_mode = ScaleMode.HEADER + config: XnippetManagerType = config + + def set_scale_mode(self, + scale_mode: Optional[Literal['header', 'apply']] = None): + self.scale_mode = scale_mode or 'header' @staticmethod def get_dataobj(scanobj:'Scan', @@ -48,13 +39,15 @@ def get_dataobj(scanobj:'Scan', return dataobj @staticmethod - def get_affine(scanobj:'Scan', reco_id:Optional[int] = None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): - return BaseMethods.get_affine_dict(scanobj, reco_id, subj_type, subj_position)['affine'] + def get_affine(scanobj:'Scan', reco_id: Optional[int] = None, + subj_type: Optional[str]=None, + subj_position: Optional[str]=None): + return BaseMethods.get_affine_dict(scanobj, reco_id, + subj_type, subj_position)['affine'] @staticmethod - def get_data_dict(scanobj:'Scan', - reco_id:Optional[int] = None): + def get_data_dict(scanobj: 'Scan', + reco_id: Optional[int] = None): datarray_analyzer = scanobj.get_datarray_analyzer(reco_id) axis_labels = datarray_analyzer.shape_desc dataarray = datarray_analyzer.get_dataarray() @@ -70,8 +63,9 @@ def get_data_dict(scanobj:'Scan', } @staticmethod - def get_affine_dict(scanobj:'Scan', reco_id:Optional[int] = None, - subj_type:Optional[str] = None, subj_position:Optional[str] = None): + def get_affine_dict(scanobj: 'Scan', reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): affine_analyzer = scanobj.get_affine_analyzer(reco_id) subj_type = subj_type or affine_analyzer.subj_type subj_position = subj_position or affine_analyzer.subj_position @@ -84,42 +78,108 @@ def get_affine_dict(scanobj:'Scan', reco_id:Optional[int] = None, } @staticmethod - def get_nifti1header(scanobj:'Scan', reco_id:Optional[int] = None, - scale_mode:Optional['ScaleMode']=None): + def update_nifti1header(scanobj: 'Scan', + nifti1image: 'Nifti1Image', + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): if reco_id: scanobj.set_scaninfo(reco_id) - scale_mode = scale_mode or ScaleMode.HEADER - return Header(scanobj.info, scale_mode).get() + scale_mode = scale_mode or 'header' + return Header(scaninfo=scanobj.info, nifti1image=nifti1image, scale_mode=scale_mode).get() @staticmethod - def get_nifti1image(scanobj:'Scan', reco_id:Optional[int] = None, - scale_mode:Optional['ScaleMode']=None, - subj_type:Optional[str] = None, subj_position:Optional[str] = None, - plugin:Optional['Plugged']=None, plugin_kws:dict=None): - if plugin and plugin.type == 'tonifti': - with plugin(scanobj, **plugin_kws) as p: - dataobj = p.get_dataobj(bool(scale_mode)) - affine = p.get_affine(subj_type=subj_type, subj_position=subj_position) - header = p.get_nifti1header() + def get_nifti1image(scanobj: 'Scan', + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, + plugin_kws: Optional[dict] = None) -> Optional[Union['Nifti1Image', List['Nifti1Image']]]: + if plugin: + if nifti1image := BaseMethods._bypass_method_via_plugin(scanobj=scanobj, + subj_type=subj_type, subj_position=subj_position, + plugin=plugin, plugin_kws=plugin_kws): + return nifti1image + else: + return None + else: + scale_mode = scale_mode or 'header' + scale_correction = 1 if scale_mode == 'apply' else 0 + dataobj = BaseMethods.get_dataobj(scanobj=scanobj, + reco_id=reco_id, + scale_correction=scale_correction) + affine = BaseMethods.get_affine(scanobj=scanobj, + reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) + return BaseMethods._assemble_nifti1image(dataobj, affine) + + @staticmethod + def _bypass_method_via_plugin(scanobj: 'Scan', + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, + plugin_kws: Optional[dict] = None) -> Optional[Nifti1Image]: + if isinstance(plugin, str): + plugin = BaseMethods._get_plugin_snippets_by_name(plugin) + if isinstance(plugin, PlugInSnippet) and 'brkraw' in plugin._manifest['package']: # TODO: need to have better tool to check version compatibility as well. + print(f'++ Installed PlugIn: {plugin}') + with plugin.run(pvobj=scanobj.pvobj, **plugin_kws) as p: + nifti1image = p.get_nifti1image(subj_type=subj_type, subj_position=subj_position) + return nifti1image else: - scale_mode = scale_mode or ScaleMode.HEADER - dataobj = BaseMethods.get_dataobj(scanobj, reco_id, bool(scale_mode)) - affine = BaseMethods.get_affine(scanobj, reco_id, subj_type, subj_position) - header = BaseMethods.get_nifti1header(scanobj, reco_id, scale_mode) - return nib.Nifti1Image(dataobj, affine, header) + warnings.warn("Failed. Given plugin not available, " + "please install local plugin or use from available on " + f"remote repository. -> {[p.name for p in config.avail]}", + UserWarning) + return None + @staticmethod + def _get_plugin_snippets_by_name(plugin: str): + fetcher = config._fetcher + if not fetcher.is_cache: + plugin = BaseMethods._filter_snippets_by_name(plugin, fetcher.local) + if fetcher.is_cache or not isinstance(plugin, PlugInSnippet): + plugin = BaseMethods._filter_snippets_by_name(plugin, fetcher.remote) + return plugin -class BasePlugin(Scan, BaseMethods): - def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], verbose: bool=False, **kwargs): - super().__init__(pvobj, **kwargs) - self.verbose = verbose + @staticmethod + def _filter_snippets_by_name(name:str, snippets: list): + if filtered := [s for s in snippets if s.name == name]: + return filtered[0] + else: + return name + + @staticmethod + def _assemble_nifti1image(scanobj: 'Scan', + dataobj: NDArray, + affine: NDArray, + scale_mode: Optional[Literal['header', 'apply']] = None): + if isinstance(dataobj, list): + # multi-dataobj (e.g. msme) + niis = BaseMethods._assemble_msme(dataobj, affine) + return [BaseMethods.update_nifti1header(nifti1image=nii, + scanobj=scanobj, + scale_mode=scale_mode) for nii in niis] + if isinstance(affine, list): + # multi-slicepacks + niis = BaseMethods._assemble_ms(dataobj, affine) + return niis + nii = Nifti1Image(dataobj=dataobj, affine=affine) + return BaseMethods.update_nifti1header(nifti1image=nii, + scanobj=scanobj, + scale_mode=scale_mode) + + @staticmethod + def _assemble_msme(dataobj: NDArray, affine: NDArray): + affine = affine if isinstance(affine, list) else [affine for _ in range(len(dataobj))] + return [Nifti1Image(dataobj=dobj, affine=affine[i]) for i, dobj in enumerate(dataobj)] + + @staticmethod + def _assemble_ms(dataobj: NDArray, affine: NDArray): + return [Nifti1Image(dataobj=dataobj[:,:,i,...], affine=aff) for i, aff in enumerate(affine)] - def close(self): - super().close() - self.clear_cache() - - def clear_cache(self): - for buffer in self._buffers: - file_path = Path(buffer.name) - if file_path.exists(): - file_path.unlink() + def list_plugin(self): + avail_dict = self.config.avail('plugin') + return {'local': [s for s in avail_dict['local'] if s.type == 'tonifti'], + 'remote': [s for s in avail_dict['remote'] if s.type == 'tonifti']} \ No newline at end of file diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 15ee638..79dfe55 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -1,18 +1,29 @@ +"""This module create header +currently not functioning as expected, need to work more +""" + from __future__ import annotations import warnings -from nibabel.nifti1 import Nifti1Header -from typing import TYPE_CHECKING, Union +from nibabel.nifti1 import Nifti1Image +from typing import TYPE_CHECKING if TYPE_CHECKING: + from typing import Optional, Literal from brkraw.api.data import ScanInfo - from .base import ScaleMode class Header: - def __init__(self, scaninfo:'ScanInfo', scale_mode:Union['ScaleMode', int]): + info: ScanInfo + scale_mode: int + nifti1image: 'Nifti1Image' + + def __init__(self, + scaninfo: 'ScanInfo', + nifti1image: 'Nifti1Image', + scale_mode: Optional[Literal['header', 'apply']] = None): self.info = scaninfo - self.scale_mode = int(scale_mode.value) - self.nifti1header = Nifti1Header() - self.nifti1header.default_x_flip = False + self.scale_mode = 1 if scale_mode == 'header' else 0 + self.nifti1image = nifti1image + self.nifti1image.header.default_x_flip = False self._set_scale_params() self._set_sliceorder() self._set_time_step() @@ -39,19 +50,25 @@ def _set_sliceorder(self): "Failed to identify compatible 'slice_code'. " "Please use this header information with care in case slice timing correction is needed." ) - self.nifti1header['slice_code'] = slice_code - + self.nifti1image.header['slice_code'] = slice_code + def _set_time_step(self): + xyzt_unit = {'cycle':('mm', 'sec')} if self.info.cycle['num_cycles'] > 1: - time_step = self.info.cycle['time_step'] - self.nifti1header['pixdim'][4] = time_step + time_step = self.info.cycle['time_step'] / 1000 + self.nifti1image.header['pixdim'][4] = time_step num_slices = self.info.slicepack['num_slices_each_pack'][0] - self.nifti1header['slice_duration'] = time_step / num_slices + self.nifti1image.header['slice_duration'] = time_step / num_slices + self.nifti1image.header.set_xyzt_units(*xyzt_unit['cycle']) def _set_scale_params(self): - if self.scale_mode == 2: - self.nifti1header['scl_slope'] = self.info.dataarray['slope'] - self.nifti1header['scl_inter'] = self.info.dataarray['offset'] + if self.scale_mode: + self.nifti1image.header.set_slope_inter(slope=self.info.dataarray['slope'], + inter=self.info.dataarray['offset']) + self._update_dtype() + + def _update_dtype(self): + self.nifti1image.header.set_data_dtype(self.nifti1image.dataobj.dtype) def get(self): - return self.nifti1header \ No newline at end of file + return self.nifti1image \ No newline at end of file diff --git a/brkraw/app/tonifti/plugin.py b/brkraw/app/tonifti/plugin.py new file mode 100644 index 0000000..191d17a --- /dev/null +++ b/brkraw/app/tonifti/plugin.py @@ -0,0 +1,54 @@ +from __future__ import annotations +from pathlib import Path +from .base import BaseMethods +from brkraw.api.data import Scan +from brkraw.api.pvobj import PvScan, PvReco, PvFiles +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Union + + +class ToNiftiPlugin(Scan, BaseMethods): + """Base class for handling plugin operations, integrating scanning and basic method functionalities. + + This class initializes plugin operations with options for verbose output and integrates functionalities + from the Scan and BaseMethods classes. It provides methods to close the plugin and clear any cached data. + + Args: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): An object representing the PV (ParaVision) scan, reconstruction, + or file data, which is central to initializing the plugin operations. + verbose (bool): Flag to enable verbose output during operations, defaults to False. + **kwargs: Additional keyword arguments that are passed to the superclass. + + Attributes: + verbose (bool): Enables or disables verbose output. + """ + def __init__(self, pvobj: Union['PvScan', 'PvReco', 'PvFiles'], + verbose: bool=False, + skip_dependency_check: bool=False, + **kwargs): + """Initializes the BasePlugin with a PV object, optional verbosity, and other parameters. + + Args: + pvobj (Union['PvScan', 'PvReco', 'PvFiles']): The primary object associated with ParaVision operations. + verbose (bool, optional): If True, enables verbose output. Defaults to False. + **kwargs: Arbitrary keyword arguments passed to the superclass initialization. + """ + super().__init__(pvobj, **kwargs) + self.verbose: bool = verbose + self.skip_dependency_check: bool = skip_dependency_check + + def close(self): + """Closes the plugin and clears any associated caches by invoking the clear_cache method. + """ + super().close() + self.clear_cache() + + def clear_cache(self): + """Clears all cached data associated with the plugin. This involves deleting files that have been + cached during plugin operations. + """ + for buffer in self._buffers: + file_path = Path(buffer.name) + if file_path.exists(): + file_path.unlink() diff --git a/brkraw/app/tonifti/scan.py b/brkraw/app/tonifti/scan.py index 989e6d4..c029157 100644 --- a/brkraw/app/tonifti/scan.py +++ b/brkraw/app/tonifti/scan.py @@ -1,17 +1,21 @@ from __future__ import annotations +from collections import OrderedDict from pathlib import Path from brkraw.api.data import Scan from brkraw.api.pvobj import PvScan, PvReco, PvFiles -from collections import OrderedDict +from .base import BaseMethods from typing import TYPE_CHECKING -from .base import BaseMethods, ScaleMode if TYPE_CHECKING: - from typing import Union, Optional - from brkraw.api.plugin import Plugged + from typing import Union, Optional, Literal + from brkraw.api import PlugInSnippet + from nibabel.nifti1 import Nifti1Image class ScanToNifti(Scan, BaseMethods): - def __init__(self, *paths: Path, scale_mode: Optional[ScaleMode]=None, **kwargs): + def __init__(self, + *paths: Path, + scale_mode: Optional[Literal['header', 'apply']] = None, + **kwargs): """_summary_ Args: @@ -22,19 +26,17 @@ def __init__(self, *paths: Path, scale_mode: Optional[ScaleMode]=None, **kwargs) if len(paths) == 0: super().__init__(**kwargs) else: - if len(paths) == 1 and paths[0].is_dir(): abspath = paths[0].absolute() + print(abspath) if contents := self._is_pvscan(abspath): pvobj = self._construct_pvscan(abspath, contents) elif contents := self._is_pvreco(abspath): pvobj = self._construct_pvreco(abspath, contents) else: pvobj = PvFiles(*paths) - # self.scanobj = Scan(pvobj=pvobj, reco_id=pvobj._reco_id) super().__init__(pvobj=pvobj, reco_id=pvobj._reco_id) - @staticmethod def _construct_pvscan(path: 'Path', contents: 'OrderedDict') -> 'PvScan': ref_paths = (path.parent, path.name) @@ -76,35 +78,58 @@ def _is_pvreco(path: 'Path') -> Union[bool, 'OrderedDict']: return contents return False - def get_affine(self, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): - return super().get_affine(scanobj=self, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + def get_affine(self, reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): + return super().get_affine(scanobj = self, + reco_id = reco_id, + subj_type = subj_type, + subj_position = subj_position) - def get_dataobj(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + def get_dataobj(self, reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - scale_correction = False if scale_mode == ScaleMode.HEADER else True + scale_correction = False if not scale_mode or scale_mode == 'header' else True if reco_id: self.set_scaninfo(reco_id) - return super().get_dataobj(scanobj=self, reco_id=reco_id, scale_correction=scale_correction) + return super().get_dataobj(scanobj = self, + reco_id = reco_id, + scale_correction = scale_correction) - def get_data_dict(self, reco_id:Optional[int]=None): + def get_data_dict(self, reco_id: Optional[int] = None): if reco_id: self.set_scaninfo(reco_id) return super().get_data_dict(scanobj=self, reco_id=reco_id) - def get_affine_dict(self, reco_id:Optional[int]=None, subj_type:Optional[str]=None, subj_position:Optional[str]=None): + def get_affine_dict(self, reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): if reco_id: self.set_scaninfo(reco_id) - return super().get_affine_dict(scanobj=self, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + return super().get_affine_dict(scanobj = self, + reco_id = reco_id, + subj_type = subj_type, + subj_position = subj_position) - def get_nifti1header(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode'] = None): + def update_nifti1header(self, + nifti1obj: 'Nifti1Image', + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - return super().get_nifti1header(self, reco_id, scale_mode).get() + return super().update_nifti1header(self, nifti1obj, reco_id, scale_mode) - def get_nifti1image(self, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None, - plugin:Optional['Plugged']=None, plugin_kws:dict=None): + def get_nifti1image(self, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, + plugin_kws: dict = None): scale_mode = scale_mode or self.scale_mode - return super().get_nifti1image(self, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) \ No newline at end of file + return super().get_nifti1image(self, + reco_id, + scale_mode, + subj_type, + subj_position, + plugin, + plugin_kws) \ No newline at end of file diff --git a/brkraw/app/tonifti/study.py b/brkraw/app/tonifti/study.py index 3c037c3..c310c4e 100644 --- a/brkraw/app/tonifti/study.py +++ b/brkraw/app/tonifti/study.py @@ -1,21 +1,26 @@ +"""Docstring for public module D100, D200.""" from __future__ import annotations from brkraw.api.data import Study -from .base import BaseMethods, ScaleMode +from .base import BaseMethods from .scan import ScanToNifti -from typing import TYPE_CHECKING, Optional - +from typing import TYPE_CHECKING if TYPE_CHECKING: + from typing import Optional, Literal, Union from pathlib import Path - from brkraw.api.plugin import Plugged + from brkraw.api import PlugInSnippet + from nibabel.nifti1 import Nifti1Header class StudyToNifti(Study, BaseMethods): - def __init__(self, path:'Path', scale_mode: Optional['ScaleMode'] = None): + """public class docstring.""" + def __init__(self, path:'Path', + scale_mode: Optional[Literal['header', 'apply']] = None): super().__init__(path) self.set_scale_mode(scale_mode) self._cache = {} - def get_scan(self, scan_id:int, reco_id:Optional[int] = None): + def get_scan(self, scan_id: int, + reco_id: Optional[int] = None): if scan_id not in self._cache.keys(): pvscan = super().get_scan(scan_id).retrieve_pvobj() self._cache[scan_id] = ScanToNifti(pvobj=pvscan, @@ -23,43 +28,103 @@ def get_scan(self, scan_id:int, reco_id:Optional[int] = None): study_address=id(self)) return self._cache[scan_id] - def get_scan_pvobj(self, scan_id:int, reco_id:Optional[int] = None): - return super().get_scan(scan_id).retrieve_pvobj() + def get_scan_pvobj(self, scan_id: int, + reco_id: Optional[int] = None): + return super().get_scan(scan_id=scan_id, + reco_id=reco_id).retrieve_pvobj() - def get_scan_analyzer(self, scan_id:int, reco_id:Optional[int]=None): - return self.get_scan(scan_id).get_scaninfo(reco_id, get_analyzer=True) + def get_scan_analyzer(self, + scan_id: int, + reco_id: Optional[int] = None): + return self.get_scan(scan_id).get_scaninfo(reco_id=reco_id, + get_analyzer=True) - def get_affine(self, scan_id:int, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): + def get_affine(self, + scan_id: int, + reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): scanobj = self.get_scan(scan_id, reco_id) - return super().get_affine(scanobj=scanobj, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + return super().get_affine(scanobj=scanobj, + reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) - def get_dataobj(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + def get_dataobj(self, scan_id: int, reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - scale_correction = False if scale_mode == ScaleMode.HEADER else True + scale_correction = False if not scale_mode or scale_mode == 'header' else True scanobj = self.get_scan(scan_id, reco_id) - return super().get_dataobj(scanobj=scanobj, reco_id=reco_id, scale_correction=scale_correction) + return super().get_dataobj(scanobj=scanobj, + reco_id=reco_id, + scale_correction=scale_correction) - def get_data_dict(self, scan_id:int, reco_id:Optional[int]=None): + def get_data_dict(self, scan_id: int, + reco_id: Optional[int] = None): scanobj = self.get_scan(scan_id, reco_id) - return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) + return super().get_data_dict(scanobj=scanobj, + reco_id=reco_id) - def get_affine_dict(self, scan_id:int, reco_id:Optional[int]=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None): - scanobj = self.get_scan(scan_id, reco_id) - return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, - subj_type=subj_type, subj_position=subj_position) + def get_affine_dict(self, + scan_id: int, + reco_id: Optional[int] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None): + scanobj = self.get_scan(scan_id=scan_id, + reco_id=reco_id) + return super().get_affine_dict(scanobj=scanobj, + reco_id=reco_id, + subj_type=subj_type, + subj_position=subj_position) - def get_nifti1header(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None): + def update_nifti1header(self, + nifti1image: 'Nifti1Header', + scan_id: int, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None): scale_mode = scale_mode or self.scale_mode - scanobj = self.get_scan(scan_id, reco_id) - return super().get_nifti1header(scanobj, scale_mode).get() + scanobj = self.get_scan(scan_id=scan_id, + reco_id=reco_id) + return super().update_nifti1header(scanobj=scanobj, + nifti1image=nifti1image, + scale_mode=scale_mode) - def get_nifti1image(self, scan_id:int, reco_id:Optional[int]=None, scale_mode:Optional['ScaleMode']=None, - subj_type:Optional[str]=None, subj_position:Optional[str]=None, - plugin:Optional['Plugged']=None, plugin_kws:dict=None): + def get_nifti1image(self, + scan_id: int, + reco_id: Optional[int] = None, + scale_mode: Optional[Literal['header', 'apply']] = None, + subj_type: Optional[str] = None, + subj_position: Optional[str] = None, + plugin: Optional[Union['PlugInSnippet', str]] = None, + plugin_kws: dict = None): scale_mode = scale_mode or self.scale_mode - scanobj = self.get_scan(scan_id, reco_id) - return super().get_nifti1image(scanobj, reco_id, scale_mode, subj_type, subj_position, plugin, plugin_kws) - \ No newline at end of file + scanobj = self.get_scan(scan_id=scan_id, + reco_id=reco_id) + return super().get_nifti1image(scanobj=scanobj, + reco_id=reco_id, + scale_mode=scale_mode, + subj_type=subj_type, + subj_position=subj_position, + plugin=plugin, + plugin_kws=plugin_kws) + + @property + def info(self): + # scan cycle + header = super().info['header'] + scans = super().info['scans'] + title = header['sw_version'] + date = header['date'] + print(title) + print('-' * len(title)) + print('date: {date}') + for key, value in header.items(): + if key not in ['date', 'sw_version']: + print(f'{key}:\t{value}') + print('\n[ScanID]\tMethod::Protocol') + max_size = len(str(max(scans.keys()))) + + for scan_id, value in scans.items(): + print(f"[{str(scan_id).zfill(max_size)}]\t{value['method']}::{value['protocol']}") + if 'recos' in value and value['recos']: + print('\tRECO:', list(value['recos'].keys())) \ No newline at end of file diff --git a/brkraw/app/tonifti/types.py b/brkraw/app/tonifti/types.py new file mode 100644 index 0000000..2fbaa92 --- /dev/null +++ b/brkraw/app/tonifti/types.py @@ -0,0 +1,18 @@ +from typing import Type, Literal, Optional, Union +from .plugin import ToNiftiPlugin +from .scan import ScanToNifti +from .study import StudyToNifti + + +ToNiftiPluginType = Type[ToNiftiPlugin] + +ScanToNiftiType = Type[ScanToNifti] + +StudyToNiftiType = Type[StudyToNifti] + +ToNiftiObject = Type[Union[ToNiftiPlugin, ScanToNifti, StudyToNifti]] + +ScaleMode = Type[Optional[Literal['header', 'apply']]] + +__all__ = ['ToNiftiPlugin', 'ScanToNifti', 'StudyToNifti'] + diff --git a/brkraw/app/viewer/__init__.py b/brkraw/app/viewer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/brkraw/app/viewer/config.py b/brkraw/app/viewer/config.py new file mode 100644 index 0000000..48cafcc --- /dev/null +++ b/brkraw/app/viewer/config.py @@ -0,0 +1,17 @@ +import sys +if sys.platform == 'darwin': + font = 'arial 14' + button_size = 10 +else: + font = 'arial 10' + button_size = 12 +win_pre_width = 250 +win_pst_width = 1050 +win_pre_height = 40 +win_pst_height = 680 + +window_posx = 100 +window_posy = 100 + +viewer_width = 400 +viewer_height = 400 \ No newline at end of file diff --git a/brkraw/app/viewer/main_win.py b/brkraw/app/viewer/main_win.py new file mode 100644 index 0000000..97b8f66 --- /dev/null +++ b/brkraw/app/viewer/main_win.py @@ -0,0 +1,215 @@ +import tkinter as tk +from tkinter import filedialog +from brkraw import __version__, load +from .scan_list import ScanList +from .scan_info import ScanInfo +from .subj_info import SubjInfo +from .previewer import Previewer +from .config import win_pre_width as _width, win_pre_height as _height +from .config import win_pst_width, win_pst_height +from .config import window_posx, window_posy + +class MainWindow(tk.Tk): + def __init__(self, *args, **kwargs): + super(MainWindow, self).__init__(*args, **kwargs) + self._raw = None + self._ignore_slope = False + self._ignore_offset = False + self._scan_id = None + self._reco_id = None + self._output = None + self.title('BrkRaw GUI - v{}'.format(__version__)) + + # initiated windows size and location + self.geometry('{}x{}+{}+{}'.format(_width, _height, + window_posx, window_posy)) + # minimal size + self.minsize(_width, _height) + self.maxsize(_width, _height) + + self._init_layout() + + def open_filediag(self): + self._path = filedialog.askopenfilename( + initialdir = ".", + title = "Select file", + filetypes = (("Zip compressed", "*.zip"), + ("Paravision 6 format", "*.PVdatasets"), + ("Paravision 360 format", "*.PvDatasets") + )) + self._extend_layout() + self._load_dataset() + + def open_dirdiag(self): + self._path = filedialog.askdirectory( + initialdir = ".", + title = "Select directory") + self._extend_layout() + self._load_dataset() + + def _init_layout(self): + # level 1 + self._subj_info = SubjInfo(self) + self._subj_info.pack( + side=tk.TOP, fill=tk.X, anchor=tk.CENTER) + + # Button binding + self._subj_info._loadfile.config(command=self.open_filediag) + self._subj_info._loaddir.config(command=self.open_dirdiag) + + def _close(self): + if self._raw != None: + self.geometry('{}x{}+{}+{}'.format(_width, _height, + window_posx, window_posy)) + + # close opened frames + self._subj_info._clean_path() + self._subj_info._main_frame.destroy() + self._subj_info._path.destroy() + self._subj_info._path_label.destroy() + # self._subj_info._close.destroy() + self._subj_info._refresh.destroy() + self._main_frame.destroy() + + self._raw.close() + self._raw = None + + # minimal size + self.minsize(_width, _height) + self.maxsize(_width, _height) + + def _extend_layout(self): + # Change windows size + self._close() + if len(self._path) != 0: + self.geometry('{}x{}+{}+{}'.format(win_pst_width, win_pst_height, + window_posx, window_posy)) + self.minsize(win_pst_width, win_pst_height) + self.maxsize(win_pst_width, win_pst_height) + + # extend level 1 + self._subj_info._extend_layout() + # self._subj_info._close.config(command=self._close) + self._subj_info._refresh.config(command=self._refresh) + + self._main_frame = tk.Frame(self) + self._main_frame.pack( + side=tk.BOTTOM, fill=tk.BOTH, expand=True) + + # level 2 + self._scan_list = ScanList(self._main_frame) + view_frame = tk.Frame(self._main_frame) + self._scan_list.pack( + side=tk.LEFT, fill=tk.BOTH) + view_frame.pack( + side=tk.LEFT, fill=tk.BOTH, expand=True) + + # level 3 + self._scan_info = ScanInfo(view_frame) + self._preview = Previewer(view_frame) + self._preview.pack( + side=tk.LEFT, fill=tk.BOTH, expand=True) + self._scan_info.pack( + side=tk.LEFT, fill=tk.BOTH, padx=10, pady=10) + self._bind_scanlist() + self._set_convert_button() + + def _refresh(self): + self._close() + self._extend_layout() + self._load_dataset() + + def _load_dataset(self): + if len(self._path) != 0: + self._raw = load(self._path) + self._init_update() + + def _init_update(self): + # take first image from dataset + self._scan_id, recos = [v for i, v in enumerate(self._raw._avail.items()) if i == 0][0] + + self._reco_id = recos[0] + # update subject info + self._subj_info.load_data(self._raw) + + # update scan and reco listbox + self._scan_list.load_data(self._raw) + self._scan_list._update_recos(self._raw, self._scan_id) + + # update scan info of first image + self._scan_info.load_data(self._raw, self._scan_id, self._reco_id) + + # update preview of first image + self._preview.load_data(self._raw, self._scan_id, self._reco_id) + + def _bind_scanlist(self): + self._scan_list._scanlist.bind('<>', self._update_scanid) + self._scan_list._recolist.bind('<>', self._update_recoid) + + def _update_scanid(self, event): + w = event.widget + index = int(w.curselection()[0]) + self._scan_id = self._raw._pvobj.avail_scan_id[index] + self._reco_id = self._raw._avail[self._scan_id][0] + self._scan_list._update_recos(self._raw, self._scan_id) + self._update_data() + + def _update_recoid(self, event): + w = event.widget + index = int(w.curselection()[0]) + self._reco_id = self._raw._avail[self._scan_id][index] + self._update_data() + + def _update_data(self): + # update scan info of first image + self._scan_info.load_data(self._raw, self._scan_id, self._reco_id) + # update preview of first image + self._preview.load_data(self._raw, self._scan_id, self._reco_id) + + def _set_convert_button(self): + self._scan_list._updt_bt.config(state=tk.NORMAL) + self._scan_list._conv_bt.config(state=tk.NORMAL) + self._scan_list._updt_bt.config(command=self._set_output) + self._scan_list._conv_bt.config(command=self._save_as) + + def _set_output(self): + self._output = filedialog.askdirectory(initialdir=self._output, + title="Select Output Directory") + + def _save_as(self): + date = self._raw.get_scan_time()['date'].strftime("%y%m%d") + pvobj = self._raw._pvobj + acqp = self._raw.get_acqp + this_acqp = acqp(self._scan_id) + scan_name = this_acqp.parameters['ACQ_scan_name'] + scan_name = scan_name.replace(' ','-') + filename = '{}_{}_{}_{}_{}_{}_{}'.format(date, + pvobj.subj_id, + pvobj.session_id, + pvobj.study_id, + self._scan_id, + self._reco_id, + scan_name) + if self._ignore_slope: + slope = None + else: + slope = False + if self._ignore_offset: + offset = None + else: + offset = False + self._raw.save_as(self._scan_id, self._reco_id, filename, + dir=self._output, slope=slope, offset=offset) + method = self._raw._pvobj._method[self._scan_id].parameters['Method'] + import re + if re.search('dti', method, re.IGNORECASE): + self._raw.save_bdata(self._scan_id, filename) + from tkinter import messagebox + messagebox.showinfo(title='File conversion', + message='{}/{}.nii.gz has been converted'.format(self._output, + filename)) + + +if __name__ == '__main__': + root = MainWindow() + root.mainloop() diff --git a/brkraw/app/viewer/previewer.py b/brkraw/app/viewer/previewer.py new file mode 100644 index 0000000..4e42150 --- /dev/null +++ b/brkraw/app/viewer/previewer.py @@ -0,0 +1,225 @@ +import tkinter as tk +from PIL import Image, ImageTk +import numpy as np +from .config import viewer_width, viewer_height + + +class Previewer(tk.Frame): + def __init__(self, *args, **kwargs): + super(Previewer, self).__init__(*args, **kwargs) + # variables + self._dataobj = None + self._imgobj = None + self._is_tripilot = False + self._current_slice = 0 + self._current_frame = 0 + + self.tkimg = None + self.slice_axis = tk.IntVar() + self.slice_axis.set(99) + + self._set_axisbuttons() + self._set_canvas() + self._set_sliders() + + def _set_canvas(self): + self._canvas = tk.Canvas(self, + width=viewer_width, + height=viewer_height) + self._canvas.place(x=50, y=30) + + def _set_axisbuttons(self): + self._axis_buttons = [] + + tk.Label(self, text='Slice Axis::').place(x=50, y=5) + for i, axis in enumerate(['x', 'y', 'z']): + button = tk.Radiobutton(self, + text=axis, + padx=10, + variable=self.slice_axis, + command=self._change_sliceaxis, + value=i) + button.place(x=150 + i*50, y=5) + + if self.slice_axis.get() == 99: + button['state'] = 'disabled' + self._axis_buttons.append(button) + + def _set_sliders(self, n_slice=0, n_frame=0): + + tk.Label(self, text='Slice').place(x=70, y=455) + tk.Label(self, text='Frame').place(x=70, y=495) + self.slice_slider = tk.Scale(self, from_=0, to=n_slice - 1, + orient=tk.HORIZONTAL, + command=self._change_slice, length=300) + + self.frame_slider = tk.Scale(self, from_=0, to=n_frame - 1, + orient=tk.HORIZONTAL, + command=self._change_frame, length=300) + + self.slice_slider.set(self._current_slice) + self.frame_slider.set(self._current_frame) + self.slice_slider.place(x=130, y=440) + self.frame_slider.place(x=130, y=480) + + if n_slice == 0: + self.slice_slider.config(state=tk.DISABLED) + if n_frame == 0: + self.frame_slider.config(state=tk.DISABLED) + + def update_image(self): + self._canvas.create_image((int(viewer_width / 2), int(viewer_height / 2)), + image=self.tkimg) + + def _load_image(self, brkraw_obj, scan_id, reco_id): + from ..lib.utils import multiply_all + # update image when scan_id and reco_id is changed + visu_pars = brkraw_obj._get_visu_pars(scan_id, reco_id) + dataobj = brkraw_obj.get_dataobj(scan_id, reco_id, slope=False) + + if len(dataobj.shape) > 3: + x, y, z = dataobj.shape[:3] + f = multiply_all(dataobj.shape[3:]) + # all converted nifti must be 4D + self._dataobj = dataobj.reshape([x, y, z, f])[:,:,::-1, ...] + else: + self._dataobj = dataobj + + # shape = brkraw_obj._get_matrix_size(visu_pars, dataobj) + # self._dataobj = dataobj.reshape(shape[::-1]).T[:,:,::-1, ...] + + n_slicepacks = brkraw_obj._get_slice_info(visu_pars)['num_slice_packs'] + spatial_info = brkraw_obj._get_spatial_info(visu_pars) + + self._resol = spatial_info['spatial_resol'] + self._matrix_size = spatial_info['matrix_size'] + + if n_slicepacks > 1: + self._is_tripilot = True + else: + self._is_tripilot = False + + def _change_sliceaxis(self): + if self.slice_axis.get() in range(3): + self._imgobj = np.swapaxes(self._dataobj, axis1=self.slice_axis.get(), axis2=2) + shape = self._imgobj.shape + n_slice = shape[2] + + self._current_slice = int(n_slice / 2) + self._current_frame = 0 + + shape = self._imgobj.shape + if len(shape) > 3: + n_frame = shape[3] + else: + n_frame = 0 + n_slice = shape[2] + + self._current_slice = int(n_slice / 2) + self._current_frame = 0 + + self._set_sliders(n_slice, n_frame) + + def _convert_image(self): + if len(self._imgobj.shape) > 3: + img = self._imgobj[:,:,self._current_slice,self._current_frame] + else: + img = self._imgobj[:,:,self._current_slice] + + slice_axis = self.slice_axis.get() + if slice_axis in range(3): + axis_ref = np.array([0, 1, 2]) + axis_ref[slice_axis], axis_ref[2] = axis_ref[2], axis_ref[slice_axis] + + self._img_resol = np.array(self._resol[0])[axis_ref] + self._img_size = np.array(self._matrix_size[0])[axis_ref] + else: + self._img_resol = np.array(self._resol[0]) + self._img_size = np.array(self._matrix_size[0]) + + img_fov = self._img_resol.astype(float) * self._img_size.astype(float) + max_val = img_fov[:2].max() + img_fov /= max_val + img_fov *= 400 + + # check resolution + img_width, img_height = int(img_fov[0]), int(img_fov[1]) + + self.tkimg = self.convert_pil2tk(self.convert_npy2pil(img), + img_width, img_height) + + def _change_slice(self, event): + self._current_slice = self.slice_slider.get() + self._convert_image() + self.update_image() + + def _change_frame(self, event): + self._current_frame = self.frame_slider.get() + self._convert_image() + self.update_image() + + def load_data(self, brkraw_obj, scan_id, reco_id): + # load image from dataset + self._load_image(brkraw_obj, scan_id, reco_id) + shape = self._dataobj.shape + if len(shape) > 3: + n_frame = shape[3] + else: + n_frame = 0 + n_slice = shape[2] + + self._current_slice = int(n_slice/2) + self._current_frame = 0 + + if self._is_tripilot: + self.slice_axis.set(99) + for button in self._axis_buttons: + button['state'] = 'disabled' + else: + for button in self._axis_buttons: + button['state'] = 'normal' + self.slice_axis.set(2) + self._set_sliders(n_slice, n_frame) + self._imgobj = self._dataobj + self._convert_image() + self.update_image() + + @staticmethod + def convert_npy2pil(data, mode=None, rescale=True): + """ convert 2D numpy.array to PIL.Image object + + Args: + data: 2D array data + mode: mode of image object + link=https://pillow.readthedocs.io/en/latest/handbook/concepts.html#modes + rescale: rescale value to 0~255 + + Returns: PIL.Image object + + """ + if rescale == True: + rescaled_data = data / data.max() * 255 + else: + rescaled_data = data + rescaled_data = rescaled_data.astype('uint8') + return Image.fromarray(rescaled_data.T, mode=mode) + + @staticmethod + def convert_pil2tk(pilobj, width, height, method='nearest'): + """ convert PIL.Image object to tkinter.PhotoImage object + This will allow plotting image on Tk.Canvas + + Args: + pilobj: 2D image object + width: width of the image + height: height of the image + method: Method for interpolation + + Returns: TkImage object + + """ + if method == 'nearest': + method = Image.NEAREST + else: + method = Image.ANTIALIAS + return ImageTk.PhotoImage(pilobj.resize((width, height), method)) \ No newline at end of file diff --git a/brkraw/app/viewer/scan_info.py b/brkraw/app/viewer/scan_info.py new file mode 100644 index 0000000..3a5bb91 --- /dev/null +++ b/brkraw/app/viewer/scan_info.py @@ -0,0 +1,72 @@ +import tkinter as tk +from .config import font + + +class ScanInfo(tk.Frame): + def __init__(self, *args, **kwargs): + super(ScanInfo, self).__init__(*args, **kwargs) + self.title = tk.Label(self, text='Selected Scan Info') + self.title.pack(side=tk.TOP, fill=tk.X) + self.textbox = tk.Text(self, width=30) + self.textbox.pack(side=tk.TOP, fill=tk.BOTH, expand=True) + self.textbox.configure(font=font) + + def load_data(self, brkraw_obj, scan_id, reco_id): + from brkraw.lib.utils import get_value, is_all_element_same + visu_pars = brkraw_obj._get_visu_pars(scan_id, reco_id) + self.textbox.config(state=tk.NORMAL) + self.textbox.delete('1.0', tk.END) + + # RepetitionTime + tr = get_value(visu_pars, 'VisuAcqRepetitionTime') + tr = ','.join(map(str, tr)) if isinstance(tr, list) else tr + # EchoTime + te = get_value(visu_pars, 'VisuAcqEchoTime') + te = 0 if te is None else te + te = ','.join(map(str, te)) if isinstance(te, list) else te + # PixelBandwidth + pixel_bw = get_value(visu_pars, 'VisuAcqPixelBandwidth') + # FlipAngle + flip_angle = get_value(visu_pars, 'VisuAcqFlipAngle') + # Sequence and Protocol names + sequence_name = get_value(visu_pars, 'VisuAcqSequenceName') + protocol_name = get_value(visu_pars, 'VisuAcquisitionProtocol') + acqpars = brkraw_obj.get_acqp(int(scan_id)) + scan_name = acqpars._parameters['ACQ_scan_name'] + # Dimension + dim = brkraw_obj._get_dim_info(visu_pars)[0] + # MatrixSize + size = brkraw_obj._get_matrix_size(visu_pars) + size = ' x '.join(map(str, size)) + # FOV size and resolution + spatial_info = brkraw_obj._get_spatial_info(visu_pars) + temp_info = brkraw_obj._get_temp_info(visu_pars) + s_resol = spatial_info['spatial_resol'] + fov_size = spatial_info['fov_size'] + fov_size = ' x '.join(map(str, fov_size)) + s_unit = spatial_info['unit'] + t_resol = '{0:.3f}'.format(temp_info['temporal_resol']) + t_unit = temp_info['unit'] + s_resol = list(s_resol[0]) if is_all_element_same(s_resol) else s_resol + s_resol = ' x '.join(['{0:.3f}'.format(r) for r in s_resol]) + # Number of slice packs + n_slicepacks = brkraw_obj._get_slice_info(visu_pars)['num_slice_packs'] + + # Printing out + self.textbox.insert(tk.END, 'Sequence:\n - {}\n'.format(sequence_name)) + self.textbox.insert(tk.END, 'Protocol:\n - {}\n'.format(protocol_name)) + self.textbox.insert(tk.END, 'Scan Name:\n - {}\n'.format(scan_name)) + self.textbox.insert(tk.END, 'RepetitionTime:\n - {} msec\n'.format(tr)) + self.textbox.insert(tk.END, 'EchoTime:\n - {} msec\n'.format(te)) + self.textbox.insert(tk.END, 'FlipAngle:\n - {} degree\n\n'.format(flip_angle)) + if isinstance(pixel_bw, float): + self.textbox.insert(tk.END, 'PixelBandwidth:\n - {0:.3f} Hz\n'.format(pixel_bw)) + else: + self.textbox.insert(tk.END, 'PixelBandwidth:\n - {} Hz\n'.format(pixel_bw)) + self.textbox.insert(tk.END, 'Dimension:\n - {}D\n'.format(dim)) + self.textbox.insert(tk.END, 'Matrix size:\n - {}\n'.format(size)) + self.textbox.insert(tk.END, 'Number of SlicePacks:\n - {}\n'.format(n_slicepacks)) + self.textbox.insert(tk.END, 'FOV size:\n - {} (mm)\n\n'.format(fov_size)) + self.textbox.insert(tk.END, 'Spatial resolution:\n - {} ({})\n'.format(s_resol, s_unit)) + self.textbox.insert(tk.END, 'Temporal resolution:\n - {} ({})\n'.format(t_resol, t_unit)) + self.textbox.config(state=tk.DISABLED) \ No newline at end of file diff --git a/brkraw/app/viewer/scan_list.py b/brkraw/app/viewer/scan_list.py new file mode 100644 index 0000000..39ae206 --- /dev/null +++ b/brkraw/app/viewer/scan_list.py @@ -0,0 +1,73 @@ +import tkinter as tk +from .config import font + + +class ScanList(tk.Frame): + def __init__(self, *args, **kwargs): + super(ScanList, self).__init__(*args, **kwargs) + self._init_scanlist() + self._init_recolist() + self._init_buttons() + + def _init_scanlist(self): + self._scanlist_label = tk.Label(self, text='Scan ID / Protocol') + self._scanlist_label.pack(side=tk.TOP, fill=tk.X, pady=5) + self._scanlist_frame = tk.Frame(self) + self._scanlist_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=10) + self._scanlist= tk.Listbox(self._scanlist_frame, width=30, + exportselection=False) + self._scanlist.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._set_scollbar(self._scanlist_frame, self._scanlist) + self._scanlist.config(font=font, state=tk.DISABLED) + self._scanlist_label.config(font=font) + + def _init_recolist(self): + self._recolist_label = tk.Label(self, text='Reco ID / DataType') + self._recolist_label.pack(side=tk.TOP, fill=tk.X, pady=5) + self._recolist_frame = tk.Frame(self, height=5) + self._recolist_frame.pack(side=tk.TOP, fill=tk.BOTH, padx=10) + self._recolist = tk.Listbox(self._recolist_frame, width=30, height=5, + exportselection=False) + self._recolist.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._set_scollbar(self._recolist_frame, self._recolist) + self._recolist.config(font=font, state = tk.DISABLED) + self._recolist_label.config(font=font) + + def _init_buttons(self): + self._button_fm = tk.Frame(self) + self._button_fm.pack(side=tk.TOP, fill=tk.X) + self._updt_bt = tk.Button(self._button_fm, text='SetOutput') + self._conv_bt = tk.Button(self._button_fm, text='Convert') + self._updt_bt.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._conv_bt.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) + self._updt_bt.config(state=tk.DISABLED, font=font) + self._conv_bt.config(state=tk.DISABLED, font=font) + + @staticmethod + def _set_scollbar(frame, listbox_obj): + scrollbar = tk.Scrollbar(frame, orient=tk.VERTICAL) + scrollbar.config(command=listbox_obj.yview) + scrollbar.pack(side=tk.RIGHT, fill="y") + listbox_obj.config(yscrollcommand=scrollbar.set) + + def load_data(self, brkraw_obj): + from brkraw.lib.utils import get_value + self._scanlist.config(state=tk.NORMAL) + for scan_id, recos in brkraw_obj._avail.items(): + visu_pars = brkraw_obj._get_visu_pars(scan_id, recos[0]) + protocol_name = get_value(visu_pars, 'VisuAcquisitionProtocol') + self._scanlist.insert(tk.END, '{}::{}'.format(str(scan_id).zfill(3), + protocol_name)) + self._scanlist.select_set(0) + + def _update_recos(self, brkraw_obj, scan_id): + from brkraw.lib.utils import get_value + self._recolist.config(state=tk.NORMAL) + recos = brkraw_obj._avail[scan_id] + self._recolist.delete(0, tk.END) + for reco_id in recos: + visu_pars = brkraw_obj._get_visu_pars(scan_id, reco_id) + frame_type = get_value(visu_pars, 'VisuCoreFrameType') + self._recolist.insert(tk.END, '{}::{}'.format(str(reco_id).zfill(3), + frame_type)) + self._recolist.select_set(0) diff --git a/brkraw/app/viewer/subj_info.py b/brkraw/app/viewer/subj_info.py new file mode 100644 index 0000000..56105c1 --- /dev/null +++ b/brkraw/app/viewer/subj_info.py @@ -0,0 +1,128 @@ +import tkinter as tk +from .config import font, button_size + + +class LabelItem(tk.Frame): + def __init__(self, *args, **kwargs): + super(LabelItem, self).__init__(*args, **kwargs) + + def set_label(self, text): + self.label = tk.Label(self, text=text, width=8, anchor=tk.CENTER) + self.entry = tk.Entry(self) + self.label.pack(side=tk.LEFT, fill=tk.X, + anchor=tk.W, ipadx=5) + self.entry.pack(side=tk.LEFT, fill=tk.X, + anchor=tk.W, ipadx=5) + self.label.configure(font=font) + self.entry.config(width=16, font=font) + + def set_entry(self, text): + self.entry.config(state=tk.NORMAL) + self.entry.delete(0, tk.END) + if text == None: + self.entry.insert(tk.END, '') + self.entry.config(state=tk.DISABLED) + else: + self.entry.insert(tk.END, text) + self.entry.config(state="readonly") + + +class SubjInfo(tk.Frame): + def __init__(self, *args, **kwargs): + super(SubjInfo, self).__init__(*args, **kwargs) + self._init_layout() + self.config(padx=10) + + def _init_layout(self): + self._upper_frame = tk.Frame(self) + self._upper_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True, anchor=tk.CENTER) + self._init_upper_frame() + + def _extend_layout(self): + self._path_label = tk.Label(self._upper_frame, text='DataPath', + width=button_size, font=font) + self._path_label.pack(side=tk.LEFT, anchor=tk.E) + # self._close = tk.Button(self._upper_frame, text='Close', + # font=font, width=button_size) + # self._close.pack(side=tk.RIGHT) + self._refresh = tk.Button(self._upper_frame, text='Refresh', + font=font, width=button_size) + self._refresh.pack(side=tk.RIGHT) + self._path = tk.Text(self._upper_frame, height=1, font=font) + self._path.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, anchor=tk.CENTER) + self._path.config(state=tk.DISABLED) + + self._main_frame = tk.Frame(self) + self._main_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True, anchor=tk.CENTER) + self._init_main_frame() + + def _set_path(self, brkraw_obj): + self._path.config(state=tk.NORMAL) + self._path.insert(tk.END, brkraw_obj._pvobj.path) + self._path.config(state=tk.DISABLED) + + def _clean_path(self): + self._path.config(state=tk.NORMAL) + self._path.delete(1.0, tk.END) + self._path.config(state=tk.DISABLED) + + def _init_upper_frame(self): + self._loadfile = tk.Button(self._upper_frame, text='Open File', + font=font, width=button_size) + self._loaddir = tk.Button(self._upper_frame, text='Open Directory', + font=font, width=button_size) + self._loadfile.pack(side=tk.LEFT) + self._loaddir.pack(side=tk.LEFT) + + def _init_main_frame(self): + self._c0 = tk.Frame(self._main_frame) + self._c0.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._c1 = tk.Frame(self._main_frame) + self._c1.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._c2 = tk.Frame(self._main_frame) + self._c2.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._c3 = tk.Frame(self._main_frame) + self._c3.pack(side=tk.LEFT, fill=tk.X, anchor=tk.NW) + self._init_labelitems() + + @staticmethod + def _set_labelitem(frame, label, text=None): + item = LabelItem(frame) + item.pack(side=tk.TOP) + item.set_label(label) + item.set_entry(text) + return item + + def _init_labelitems(self): + self._account = self._set_labelitem(self._c0, 'Account') + self._scandate = self._set_labelitem(self._c0, 'Scan Date') + self._researcher = self._set_labelitem(self._c0, 'Researcher') + self._subjectid = self._set_labelitem(self._c1, 'Subject ID') + self._sessionid = self._set_labelitem(self._c1, 'Session ID') + self._studyid = self._set_labelitem(self._c1, 'Study ID') + self._dob = self._set_labelitem(self._c2, 'DOB') + self._sex = self._set_labelitem(self._c2, 'Sex') + self._weight = self._set_labelitem(self._c2, 'Weight') + self._type = self._set_labelitem(self._c3, 'Type') + self._position = self._set_labelitem(self._c3, 'Position') + self._entry = self._set_labelitem(self._c3, 'Entry') + + def load_data(self, brkraw_obj): + try: + datetime = brkraw_obj.get_scan_time() + except Exception: + datetime = dict(date='N/A', start_time='N/A') + pvobj = brkraw_obj._pvobj + self._account.set_entry(pvobj.user_account) + self._researcher.set_entry(pvobj.user_name) + self._scandate.set_entry('{}, {}'.format(datetime['date'], datetime['start_time'])) + self._subjectid.set_entry(pvobj.subj_id) + self._sessionid.set_entry(pvobj.session_id) + self._studyid.set_entry(pvobj.study_id) + self._dob.set_entry(pvobj.subj_dob) + self._sex.set_entry(pvobj.subj_sex) + self._weight.set_entry('{} kg'.format(pvobj.subj_weight)) + self._type.set_entry(pvobj.subj_type) + self._position.set_entry(pvobj.subj_pose) + self._entry.set_entry(pvobj.subj_entry) + self._set_path(brkraw_obj) \ No newline at end of file diff --git a/brkraw/config.py b/brkraw/config.py deleted file mode 100644 index 545d23e..0000000 --- a/brkraw/config.py +++ /dev/null @@ -1,87 +0,0 @@ -import toml -from pathlib import Path -from brkraw import __version__ - -class ConfigManager: - """ - Manage the configuration settings. - - Notes: - - Provides methods to ensure the existence of the config directory, load or create the configuration, set configuration values, and retrieve configuration values. - """ - def __init__(self): - """ - Initialize the configuration object. - - Notes: - - Sets up the home directory, config directory, and config file paths. - - Ensures the existence of the config directory and loads or creates the configuration. - """ - self.home_dir = Path.home() - self.config_dir = self.home_dir / '.brkraw' - self.config_file = self.config_dir / 'config.toml' - self.ensure_config_dir_exists() - self.load_or_create_config() - - def ensure_config_dir_exists(self): - """ - Ensure the existence of the configuration directory. - - Notes: - - Creates the config directory if it does not already exist. - - Also creates 'plugin' and 'bids' directories within the config directory. - """ - if not self.config_dir.exists(): - self.config_dir.mkdir() - (self.config_dir / 'plugin').mkdir() - (self.config_dir / 'bids').mkdir() - - def load_or_create_config(self): - """ - Load an existing configuration file or create a new one if it does not exist. - - Notes: - - If the config file does not exist, a default configuration is created and saved. - - Otherwise, the existing configuration is loaded from the file. - """ - if not self.config_file.exists(): - default_config = { - 'version': __version__ - } - with open(self.config_file, 'w') as f: - toml.dump(default_config, f) - self.config = default_config - else: - with open(self.config_file, 'r') as f: - self.config = toml.load(f) - - def set(self, key, value): - """ - Set a key-value pair in the configuration and save the updated configuration to the file. - - Args: - key: The key to set in the configuration. - value: The value to associate with the key. - - Notes: - - Updates the configuration with the provided key-value pair. - - Persists the updated configuration to the config file. - """ - self.config[key] = value - with open(self.config_file, 'w') as f: - toml.dump(self.config, f) - - def get(self, key): - """ - Retrieve the value associated with the given key from the configuration. - - Args: - key: The key to retrieve the value for. - - Returns: - The value associated with the key in the configuration, or None if the key is not found. - - Notes: - - Returns the value corresponding to the provided key from the configuration. - """ - return self.config.get(key) diff --git a/brkraw/config.yaml b/brkraw/config.yaml new file mode 100644 index 0000000..29b69fe --- /dev/null +++ b/brkraw/config.yaml @@ -0,0 +1,15 @@ +xnippet: + repo: + - name: brkraw-snippets + url: https://github.com/brkraw/brkraw-snippets.git + plugin: + path: plugin + +app: + tonifti: + output_filename: + recipy: "@brkraw-snippets,better-tonifti:studyinfo" + format: ___ + spec: null + studyinfo: + recipe: brkraw-snippets:studyinfo #:: \ No newline at end of file diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..42aaab3 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version=3.10 +ignore_missing_imports = True \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index b56781d..4b63c30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,36 +10,51 @@ classifiers = [ 'Natural Language :: English', ] dependencies = [ - 'nibabel>=3.0.2', + 'pyyaml>=6.0.1', 'numpy>=1.18.0', - 'pandas>=1.0.0', - 'pillow>=7.1.1', 'tqdm>=4.45.0', + 'xnippet>=0.1.0', + 'nibabel>=3.0.2', + 'pandas>=1.0.0', 'openpyxl>=3.0.3', 'xlrd>=1.0.0', - 'toml>=0.10.2' ] description = "Bruker PvDataset Loader" license = {text = "GNLv3"} dynamic = ["version"] -maintainers = [{name = "SungHo Lee", email = 'shlee@unc.edu'}] +maintainers = [ + {name = "SungHo Lee", email = 'shlee@unc.edu'} + ] name = "brkraw" readme = "README.md" requires-python = ">=3.7" -keywords = ['bruker', 'data_handler', 'converter', 'administrator_tool'] +keywords = [ + 'bruker', + 'data_handler', + 'converter', + 'administrator_tool', + 'extensible', + 'xoani' + ] [project.urls] -Homepage = "https://github.com/brkraw/brkraw" +Homepage = "https://brkraw.github.io" [project.optional-dependencies] -SimpleITK = [ +legacy = [ 'SimpleITK>=1.2.4' -] + ] + +viewer = [ + 'pillow>=7.1.1' + ] + dev = [ "flake8", "pytest", - "nbmake" -] + "nbmake", + "types-PyYAML" + ] [tool.hatch.version] path = "brkraw/__init__.py" diff --git a/tests/01_api_pvobj_test.py b/tests/01_api_pvobj_test.py new file mode 100644 index 0000000..3e2a835 --- /dev/null +++ b/tests/01_api_pvobj_test.py @@ -0,0 +1,19 @@ +# def test_loading(dataset): +# scan_contents = ['method', 'acqp'] +# reco_contents = ['2dseq', 'visu_pars', 'reco'] + +# for v, subset in dataset.items(): +# print(f'- v{v}:') +# for fname, rawobj in subset.items(): +# print(f' + testing {fname}') +# for scan_id in rawobj.avail: +# scanobj = rawobj.get_scan(scan_id) +# failed = sum([int(f in scan_contents) for f in scanobj._contents['files']]) < len(scan_contents) +# if failed: +# print(f' - [{scan_id}] object does not contain all {scan_contents}') +# else: +# for reco_id in scanobj.avail: +# recoobj = scanobj.get_reco(reco_id) +# failed = sum([int(f in reco_contents) for f in recoobj.contents['files']]) < len(reco_contents) +# if failed: +# print(f' - [{scan_id}][{reco_id}] object does not contain all {reco_contents}') diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e40982f --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,40 @@ +import pytest +import re +from pathlib import Path +from brkraw.api.pvobj import PvStudy +from pprint import pprint + +# test functions +def get_version(raw): + ptrn = r'^[a-zA-Z]*[ -]?(?P\d+\.\d+(?:\.\d+)?)' + for scan_id in raw.avail: + pvscan = raw.get_scan(scan_id) + if version := pvscan.acqp.get('ACQ_sw_version'): + if matched := re.match(ptrn, version): + return matched.groupdict()['version'] + +def check_contents(path: Path): + if path.is_dir(): + if any([e.is_dir() and e.name.isdigit() for e in path.iterdir()]): + return PvStudy(path) + for e in path.iterdir(): + return check_contents(e) + elif path.is_file(): + if path.name.endswith('.zip'): + return PvStudy(path) + +@pytest.fixture +def dataset(): + return get_dataset() + +def get_dataset(): + dataset_path = Path('/mnt/nfs/active/Xoani_Lee_Package-dev/playground/brkraw_dev') + + dataset = {} + for contents in dataset_path.iterdir(): + if raw := check_contents(contents): + if version := get_version(raw): + if version not in dataset.keys(): + dataset[version] = {} + dataset[version][raw.path.name] = raw + return dataset \ No newline at end of file