Skip to content

Commit

Permalink
Merge pull request #192 from mattjala/fstrings
Browse files Browse the repository at this point in the history
Replace python 2 fstrings with python 3 fstrings
  • Loading branch information
mattjala authored May 1, 2024
2 parents d79fbfb + 7b9a2e0 commit 4885855
Show file tree
Hide file tree
Showing 14 changed files with 63 additions and 73 deletions.
6 changes: 3 additions & 3 deletions h5pyd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@


__doc__ = \
"""
f"""
This is the h5pyd package, a Python interface to the HDF REST Server.
Version %s
Version {version.version}
""" % (version.version)
"""


def enable_ipython_completer():
Expand Down
2 changes: 1 addition & 1 deletion h5pyd/_apps/hsget.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import h5py
import h5pyd
except ImportError as e:
sys.stderr.write("ERROR : %s : install it to use this utility...\n" % str(e))
sys.stderr.write(f"ERROR : {str(e)} : install it to use this utility...\n")
sys.exit(1)

if __name__ == "__main__":
Expand Down
7 changes: 3 additions & 4 deletions h5pyd/_hl/attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,8 +222,7 @@ def create(self, name, data, shape=None, dtype=None):

if is_complex:
raise TypeError(
'Wrong committed datatype for complex numbers: %s' %
dtype.name)
f'Wrong committed datatype for complex numbers: {dtype.name}')
elif dtype is None:
if data.dtype.kind == 'U':
# use vlen for unicode strings
Expand All @@ -243,7 +242,7 @@ def create(self, name, data, shape=None, dtype=None):

# Make sure the subshape matches the last N axes' sizes.
if shape[-len(subshape):] != subshape:
raise ValueError("Array dtype shape %s is incompatible with data shape %s" % (subshape, shape))
raise ValueError(f"Array dtype shape {subshape} is incompatible with data shape {shape}")

# New "advertised" shape and dtype
shape = shape[0:len(shape) - len(subshape)]
Expand Down Expand Up @@ -383,4 +382,4 @@ def __contains__(self, name):
def __repr__(self):
if not self._parent.id.id:
return "<Attributes of closed HDF5 object>"
return "<Attributes of HDF5 object at %s>" % id(self._parent.id)
return f"<Attributes of HDF5 object at {id(self._parent.id)}>"
21 changes: 10 additions & 11 deletions h5pyd/_hl/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def readtime_dtype(basetype, names):
elif itemsize == 8:
return numpy.dtype(numpy.complex64)
else:
TypeError("Unsupported dtype for complex numbers: %s" % basetype)
TypeError(f"Unsupported dtype for complex numbers: {basetype}")

if len(names) == 0: # Not compound, or we want all fields
return basetype
Expand All @@ -60,7 +60,7 @@ def readtime_dtype(basetype, names):

for name in names: # Check all names are legal
if name not in basetype.names:
raise ValueError("Field %s does not appear in this type." % name)
raise ValueError(f"Field {name} does not appear in this type.")

return numpy.dtype([(name, basetype.fields[name][0]) for name in names])

Expand Down Expand Up @@ -721,7 +721,7 @@ def __init__(self, bind):
"""Create a new Dataset object by binding to a low-level DatasetID."""

if not isinstance(bind, DatasetID):
raise ValueError("%s is not a DatasetID" % bind)
raise ValueError(f"{bind} is not a DatasetID")
HLObject.__init__(self, bind)

self._dcpl = self.id.dcpl_json
Expand Down Expand Up @@ -781,7 +781,7 @@ def resize(self, size, axis=None):

if axis is not None:
if not (axis >= 0 and axis < self.id.rank):
raise ValueError("Invalid axis (0 to %s allowed)" % (self.id.rank - 1))
raise ValueError(f"Invalid axis (0 to {self.id.rank - 1} allowed)")
try:
newlen = int(size)
except TypeError:
Expand Down Expand Up @@ -1404,8 +1404,7 @@ def __setitem__(self, args, val):
):
if self.dtype.kind != "V" or self.dtype.names != ("r", "i"):
raise TypeError(
"Wrong dataset dtype for complex number values: %s"
% self.dtype.fields
f"Wrong dataset dtype for complex number values: {self.dtype.fields}"
)
if isinstance(val, complex):
val = numpy.asarray(val, dtype=type(val))
Expand All @@ -1425,7 +1424,7 @@ def __setitem__(self, args, val):
if len(names) == 1 and self.dtype.fields is not None:
# Single field selected for write, from a non-array source
if not names[0] in self.dtype.fields:
raise ValueError("No such field for indexing: %s" % names[0])
raise ValueError(f"No such field for indexing: {names[0]}")
dtype = self.dtype.fields[names[0]][0]
cast_compound = True
else:
Expand Down Expand Up @@ -1456,8 +1455,8 @@ def __setitem__(self, args, val):
shp = self.dtype.subdtype[1] # type shape
valshp = val.shape[-len(shp):]
if valshp != shp: # Last dimension has to match
raise TypeError("When writing to array types,\
last N dimensions have to match (got %s, but should be %s)" % (valshp, shp,))
raise TypeError(f"When writing to array types,\
last N dimensions have to match (got {valshp}, but should be {shp})")
mtype = h5t.py_create(numpy.dtype((val.dtype, shp)))
mshape = val.shape[0:len(val.shape)-len(shp)]
"""
Expand All @@ -1469,8 +1468,8 @@ def __setitem__(self, args, val):
raise TypeError("Illegal slicing argument (not a compound dataset)")
mismatch = [x for x in names if x not in self.dtype.fields]
if len(mismatch) != 0:
mismatch = ", ".join('"%s"' % x for x in mismatch)
raise ValueError("Illegal slicing argument (fields %s not in dataset type)" % mismatch)
mismatch = ", ".join(f"{x}" for x in mismatch)
raise ValueError(f"Illegal slicing argument (fields {mismatch} not in dataset type)")

# Use mtype derived from array (let DatasetID.write figure it out)
else:
Expand Down
4 changes: 2 additions & 2 deletions h5pyd/_hl/datatype.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, bind):
"""
if not isinstance(bind, TypeID):
# todo: distinguish type from other hl objects
raise ValueError("%s is not a TypeID" % bind)
raise ValueError(f"{bind} is not a TypeID")
HLObject.__init__(self, bind)

self._dtype = createDataType(self.id.type_json)
Expand All @@ -55,7 +55,7 @@ def __repr__(self):
namestr = '("anonymous")'
else:
name = pp.basename(pp.normpath(self.name))
namestr = '"%s"' % (name if name != '' else '/')
namestr = f"{name if name != '' else '/'}"
if name:
namestr = f'"{name}"'
else:
Expand Down
2 changes: 1 addition & 1 deletion h5pyd/_hl/dims.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def __iter__(self):
def __repr__(self):
if not self._id:
return '<Dimensions of closed HDF5 dataset>'
return '<Dimensions of HDF5 dataset at %s>' % self._id
return f'<Dimensions of HDF5 dataset at {self._id}>'

def create_scale(self, dset, name=''):
''' Create a new dimension, from an initial scale.
Expand Down
8 changes: 4 additions & 4 deletions h5pyd/_hl/group.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __init__(self, bind, **kwargs):
"""

if not isinstance(bind, GroupID):
raise ValueError("%s is not a GroupID" % bind)
raise ValueError(f"{bind} is not a GroupID")
HLObject.__init__(self, bind, **kwargs)
self._req_prefix = "/groups/" + self.id.uuid
self._link_db = {} # cache for links
Expand Down Expand Up @@ -544,7 +544,7 @@ def require_group(self, name):
return self.create_group(name)
grp = self[name]
if not isinstance(grp, Group):
raise TypeError("Incompatible object (%s) already exists" % grp.__class__.__name__)
raise TypeError(f"Incompatible object ({grp.__class__.__name__}) already exists")
return grp

def getObjByUuid(self, uuid, collection_type=None):
Expand Down Expand Up @@ -1165,7 +1165,7 @@ def __init__(self, path):
self._path = str(path)

def __repr__(self):
return '<SoftLink to "%s">' % self.path
return f'<SoftLink to "{self.path}">'


class ExternalLink(object):
Expand All @@ -1188,7 +1188,7 @@ def __init__(self, filename, path):
self._path = str(path)

def __repr__(self):
return '<ExternalLink to "%s" in file "%s">' % (self.path, self.filename)
return f'<ExternalLink to "{self.path}" in file "{self.filename}">'


class UserDefinedLink(object):
Expand Down
4 changes: 2 additions & 2 deletions h5pyd/_hl/h5type.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def special_dtype(**kwds):

return dt

raise TypeError('Unknown special type "%s"' % name)
raise TypeError(f'Unknown special type "{name}"')


def check_vlen_dtype(dt):
Expand Down Expand Up @@ -265,7 +265,7 @@ class (either Reference or RegionReference). Returns None if the dtype
name, dt = kwds.popitem()

if name not in ('vlen', 'enum', 'ref'):
raise TypeError('Unknown special type "%s"' % name)
raise TypeError(f'Unknown special type "{name}"')

try:
return dt.metadata[name]
Expand Down
8 changes: 4 additions & 4 deletions h5pyd/_hl/selections.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ def broadcast(self, target_shape):
"""
if self._shape == ():
if np.product(target_shape) != 1:
raise TypeError("Can't broadcast %s to scalar" % target_shape)
raise TypeError(f"Can't broadcast {target_shape} to scalar")
self._id.select_all()
yield self._id
return
Expand All @@ -419,7 +419,7 @@ def broadcast(self, target_shape):
if t == 1 or count[-idx] == t:
tshape.append(t)
else:
raise TypeError("Can't broadcast %s -> %s" % (target_shape, count))
raise TypeError(f"Can't broadcast {target_shape} -> {count}")
tshape.reverse()
tshape = tuple(tshape)

Expand Down Expand Up @@ -708,7 +708,7 @@ def guess_shape(sid):
return tuple()

elif sel_class != 'H5S_SIMPLE':
raise TypeError("Unrecognized dataspace class %s" % sel_class)
raise TypeError(f"Unrecognized dataspace class {sel_class}")

# We have a "simple" (rank >= 1) dataspace

Expand All @@ -727,7 +727,7 @@ def guess_shape(sid):
return (N,)

elif sel_type != H5S_SELECT_HYPERSLABS:
raise TypeError("Unrecognized selection method %s" % sel_type)
raise TypeError(f"Unrecognized selection method {sel_type}")

# We have a hyperslab-based selection

Expand Down
4 changes: 2 additions & 2 deletions h5pyd/_hl/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(self, bind):
"""

if not isinstance(bind, DatasetID):
raise ValueError("%s is not a DatasetID" % bind)
raise ValueError(f"{bind} is not a DatasetID")
Dataset.__init__(self, bind)

if len(self._dtype) < 1:
Expand Down Expand Up @@ -144,7 +144,7 @@ def readtime_dtype(basetype, names):

for name in names: # Check all names are legal
if name not in basetype.names:
raise ValueError("Field %s does not appear in this type." % name)
raise ValueError(f"Field {name} does not appear in this type.")

return numpy.dtype([(name, basetype.fields[name][0]) for name in names])

Expand Down
28 changes: 10 additions & 18 deletions h5pyd/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,26 +31,18 @@
api_version_tuple = (0, 18, 0)
api_version = "0.18.0"

__doc__ = """\
This is h5pyd **%s**
__doc__ = f"""\
This is h5pyd **{version}**
""" % (
version
)
"""

info = """\
info = f"""\
Summary of the h5pyd configuration
---------------------------------
h5pyd %(h5pyd)s
Python %(python)s
sys.platform %(platform)s
sys.maxsize %(maxsize)s
numpy %(numpy)s
""" % {
"h5pyd": version,
"python": sys.version,
"platform": sys.platform,
"maxsize": sys.maxsize,
"numpy": numpy.__version__,
}
h5pyd {version}
Python {sys.version}
sys.platform {sys.platform}
sys.maxsize {sys.maxsize}
numpy {numpy.__version__}
"""
20 changes: 10 additions & 10 deletions test/apps/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,15 +113,15 @@ def assertSameElements(self, a, b):
if x == y:
match = True
if not match:
raise AssertionError("Item '%s' appears in a but not b" % x)
raise AssertionError(f"Item '{x}' appears in a but not b")

for x in b:
match = False
for y in a:
if x == y:
match = True
if not match:
raise AssertionError("Item '%s' appears in b but not a" % x)
raise AssertionError(f"Item '{x}' appears in b but not a")

def assertArrayEqual(self, dset, arr, message=None, precision=None):
""" Make sure dset and arr have the same shape, dtype and contents, to
Expand All @@ -134,41 +134,41 @@ def assertArrayEqual(self, dset, arr, message=None, precision=None):
if message is None:
message = ''
else:
message = ' (%s)' % message
message = f' ({message})'

if np.isscalar(dset) or np.isscalar(arr):
self.assertTrue(
np.isscalar(dset) and np.isscalar(arr),
'Scalar/array mismatch ("%r" vs "%r")%s' % (dset, arr, message)
f'Scalar/array mismatch ("{dset}" vs "{arr}"){message}'
)
self.assertTrue(
dset - arr < precision,
"Scalars differ by more than %.3f%s" % (precision, message)
f"Scalars differ by more than {precision:.3}{message}"
)
return

self.assertTrue(
dset.shape == arr.shape,
"Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
f"Shape mismatch ({dset.shape} vs {arr.shape}){message}"
)
self.assertTrue(
dset.dtype == arr.dtype,
"Dtype mismatch (%s vs %s)%s" % (dset.dtype, arr.dtype, message)
f"Dtype mismatch ({dset.dtype} vs {arr.dtype}){message}"
)

if arr.dtype.names is not None:
for n in arr.dtype.names:
message = '[FIELD %s] %s' % (n, message)
message = f'[FIELD {n}] {message}'
self.assertArrayEqual(dset[n], arr[n], message=message, precision=precision)
elif arr.dtype.kind in ('i', 'f'):
self.assertTrue(
np.all(np.abs(dset[...] - arr[...]) < precision),
"Arrays differ by more than %.3f%s" % (precision, message)
f"Arrays differ by more than {precision:.3}{message}"
)
else:
self.assertTrue(
np.all(dset[...] == arr[...]),
"Arrays are not equal (dtype %s) %s" % (arr.dtype.str, message)
f"Arrays are not equal (dtype {arr.dtype.str}) {message}"
)

def assertNumpyBehavior(self, dset, arr, s):
Expand Down
Loading

0 comments on commit 4885855

Please sign in to comment.