Skip to content

Commit

Permalink
Merge pull request #1168 from girder/parse-vendor-info
Browse files Browse the repository at this point in the history
Parse qptiff and imagej vendor information.
  • Loading branch information
manthey authored May 23, 2023
2 parents 3304446 + c0cb72e commit 16c2f91
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 4 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Change Log

## 1.21.1

### Improvements
- Parse qptiff and imagej vendor information ([#1168](../../pull/1168))

## 1.21.0

### Improvements
Expand Down
23 changes: 23 additions & 0 deletions sources/tiff/large_image_source_tiff/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ def __init__(self, path, **kwargs): # noqa
self.sizeX = highest.imageWidth
self.sizeY = highest.imageHeight
self._checkForInefficientDirectories()
self._checkForVendorSpecificTags()

def _scanDirectories(self):
lastException = None
Expand Down Expand Up @@ -347,6 +348,7 @@ def _initWithTiffTools(self): # noqa
for idx in range(self.levels - 1)]
self._tiffDirectories.append(dir0)
self._checkForInefficientDirectories()
self._checkForVendorSpecificTags()
return True

def _checkForInefficientDirectories(self, warn=True):
Expand Down Expand Up @@ -396,6 +398,27 @@ def _reorient_numpy_image(self, image, orientation):
image = image[::, ::-1, ::]
return image

def _checkForVendorSpecificTags(self):
if not hasattr(self, '_frames') or len(self._frames) <= 1:
return
if self._frames[0].get('frame', {}).get('IndexC'):
return
dir = self._tiffDirectories[-1]
if not hasattr(dir, '_description_record'):
return
if dir._description_record.get('PerkinElmer-QPI-ImageDescription', {}).get('Biomarker'):
channels = []
for frame in range(len(self._frames)):
dir = self._getDirFromCache(*self._frames[frame]['dirs'][-1])
channels.append(dir._description_record.get(
'PerkinElmer-QPI-ImageDescription', {}).get('Biomarker'))
if channels[-1] is None:
return
self._frames[0]['channels'] = channels
for idx, frame in enumerate(self._frames):
frame.setdefault('frame', {})
frame['frame']['IndexC'] = idx

def _addAssociatedImage(self, largeImagePath, directoryNum, mustBeTiled=False, topImage=None):
"""
Check if the specified TIFF directory contains an image with a sensible
Expand Down
31 changes: 27 additions & 4 deletions sources/tifffile/large_image_source_tifffile/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import logging
import math
import os
Expand Down Expand Up @@ -76,6 +77,7 @@ class TifffileFileTileSource(FileTileSource, metaclass=LruCacheMetaclass):
_tileSize = 512
_minImageSize = 128
_minTileSize = 128
_singleTileSize = 1024
_maxTileSize = 2048
_minAssociatedImageSize = 64
_maxAssociatedImageSize = 8192
Expand All @@ -102,6 +104,8 @@ def __init__(self, path, **kwargs):
self.tileWidth = self.tileHeight = self._tileSize
s = self._tf.series[maxseries]
self._baseSeries = s
if len(s.levels) == 1:
self.tileWidth = self.tileHeight = self._singleTileSize
page = s.pages[0]
if ('TileWidth' in page.tags and
self._minTileSize <= page.tags['TileWidth'].value <= self._maxTileSize):
Expand Down Expand Up @@ -243,6 +247,15 @@ def _findAssociatedImages(self):
max(entry['width'], entry['height']) >= self._minAssociatedImageSize):
self._associatedImages[id] = entry

def _handle_imagej(self):
try:
ijm = self._tf.pages[0].tags['IJMetadata'].value
if (ijm['Labels'] and len(ijm['Labels']) == self._framecount and
not getattr(self, '_channels', None)):
self._channels = ijm['Labels']
except Exception:
pass

def _handle_scn(self): # noqa
"""
For SCN files, parse the xml and possibly adjust how associated images
Expand Down Expand Up @@ -359,7 +372,8 @@ def getInternalMetadata(self, **kwargs):
pages.extend([page for page in self._tf.pages if page not in pagesInSeries])
for page in pages:
for tag in getattr(page, 'tags', []):
if tag.dtype_name == 'ASCII' and tag.value:
if (tag.dtype_name == 'ASCII' or (
tag.dtype_name == 'BYTE' and isinstance(tag.value, dict))) and tag.value:
key = basekey = tag.name
suffix = 0
while key in result:
Expand All @@ -368,6 +382,13 @@ def getInternalMetadata(self, **kwargs):
suffix += 1
key = '%s_%d' % (basekey, suffix)
result[key] = tag.value
if isinstance(result[key], dict):
result[key] = result[key].copy()
for subkey in list(result[key]):
try:
json.dumps(result[key][subkey])
except Exception:
del result[key][subkey]
if hasattr(self, '_xml') and 'xml' not in result:
result.pop('ImageDescription', None)
result['xml'] = self._xml
Expand Down Expand Up @@ -420,11 +441,13 @@ def getTile(self, x, y, z, pilImageAllowed=False, numpyAllowed=False, **kwargs):
if sidx not in self._zarrcache:
if len(self._zarrcache) > 10:
self._zarrcache = {}
self._zarrcache[sidx] = zarr.open(series.aszarr(), mode='r')
za = self._zarrcache[sidx]
za = zarr.open(series.aszarr(), mode='r')
hasgbs = hasattr(za[0], 'get_basic_selection')
self._zarrcache[sidx] = (za, hasgbs)
za, hasgbs = self._zarrcache[sidx]
xidx = series.axes.index('X')
yidx = series.axes.index('Y')
if hasattr(za[0], 'get_basic_selection'):
if hasgbs:
bza = za[0]
# we could cache this
for ll in range(len(series.levels) - 1, 0, -1):
Expand Down

0 comments on commit 16c2f91

Please sign in to comment.