Skip to content

Commit

Permalink
NLPAR Chunk Patch ... again (#62)
Browse files Browse the repository at this point in the history
Fixed
-----
- This time I think that edge case for NLPAR chunking of scans is really fixed.
- Wrote on a chalkboard 100 times, "I will run ALL the unit tests before release."

Signed-off by: David Rowenhorst <[email protected]>
  • Loading branch information
drowenhorst-nrl authored Jun 7, 2024
1 parent 5d84da3 commit 93ee812
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 3 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,15 @@ Changelog
All notable changes to PyEBSDIndex will be documented in this file. The format is based
on `Keep a Changelog <https://keepachangelog.com/en/1.1.0>`_.


0.3.4 (2024-06-07)
==================

Fixed
-----
- This time I think that edge case for NLPAR chunking of scans is really fixed.
- Wrote on a chalkboard 100 times, "I will run ALL the unit tests before release."

0.3.3 (2024-06-07)
==================

Expand Down
2 changes: 1 addition & 1 deletion pyebsdindex/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
]
__description__ = "Python based tool for Radon based EBSD indexing"
__name__ = "pyebsdindex"
__version__ = "0.3.3"
__version__ = "0.3.4"


# Try to import only once - also will perform check that at least one GPU is found.
Expand Down
20 changes: 18 additions & 2 deletions pyebsdindex/nlpar_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,7 @@ def calcnlpar(self, chunksize=0, searchradius=None, lam = None, dthresh = None,

nthreadpos = numba.get_num_threads()
#numba.set_num_threads(18)
#numba.set_num_threads(18)
colstartcount = np.asarray([0,ncols],dtype=np.int64)
if verbose >= 1:
print("lambda:", self.lam, "search radius:", self.searchradius, "dthresh:", self.dthresh)
Expand Down Expand Up @@ -776,14 +777,29 @@ def _calcchunks(self, patdim, ncol, nrow, target_bytes=2e9, col_overlap=0, row_o
colchunks[-1, 1] = ncol

if ncolchunks > 1:
colchunks[-1, 0] = max(0, colchunks[-2, 1] - col_overlap)
colchunks[-1, 0] = max(0, colchunks[-2, 1] - 2*col_overlap-1)

colchunks += col_offset

# for i in range(ncolchunks - 1):
# if colchunks[i + 1, 0] >= ncol:
# colchunks = colchunks[0:i + 1, :]

rowchunks = []
row_overlap = int(row_overlap)
for r in range(nrowchunks):
rchunk = [int(r * rowstep) - row_overlap, int(r * rowstep + rowstepov) - row_overlap]
rowchunks.append(rchunk)
if rchunk[1] > nrow:
break

nrowchunks = len(rowchunks)
rowchunks = np.array(rowchunks, dtype=int)
rowchunks[0, 0] = 0
# for i in range(ncolchunks - 1):
# if colchunks[i + 1, 0] >= ncol:
# colchunks = colchunks[0:i + 1, :]

rowchunks = []
row_overlap = int(row_overlap)
for r in range(nrowchunks):
Expand All @@ -798,7 +814,7 @@ def _calcchunks(self, patdim, ncol, nrow, target_bytes=2e9, col_overlap=0, row_o
rowchunks[-1, 1] = nrow

if nrowchunks > 1:
rowchunks[-1, 0] = max(0, rowchunks[-2, 1] - row_overlap)
rowchunks[-1, 0] = max(0, rowchunks[-2, 1] - 2*row_overlap-1)

rowchunks += row_offset

Expand Down

0 comments on commit 93ee812

Please sign in to comment.