forked from omarocegueda/registration
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathTransformationModel.py
318 lines (301 loc) · 13.2 KB
/
TransformationModel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
'''
Definition of the TransformationModel class, which is the composition of
an affine pre-aligning transformation followed by a nonlinear transformation
followed by an affine post-multiplication.
'''
import numpy as np
import tensorFieldUtils as tf
import numpy.linalg as linalg
def scale_affine(affine, factor):
r'''
Multiplies the translation part of the affine transformation by a factor
to be used with upsampled/downsampled images (if the affine transformation)
corresponds to an Image I and we need to apply the corresponding
transformation to a downsampled version J of I, then the affine matrix
is the same as for I but the translation is scaled.
'''
scaled_affine = affine.copy()
domain_dimension = affine.shape[1]-1
scaled_affine[:domain_dimension, domain_dimension] *= factor
return scaled_affine
class TransformationModel(object):
'''
This class maps points between two spaces: "reference space" and "target
space"
Forward: maps target to reference, y=affine_post*forward(affine_pre*x)
Backward: maps reference to target,
x = affine_pre^{-1}*backward(affine_post^{-1}*y)
'''
def __init__(self,
forward = None,
backward = None,
affine_pre = None,
affine_post = None):
self.dim = None
self.set_forward(forward)
self.set_backward(backward)
self.set_affine_pre(affine_pre)
self.set_affine_post(affine_post)
def set_affine_pre(self, affine_pre):
r'''
Establishes the pre-multiplication affine matrix of this
transformation, computes its inverse and adjusts the dimension of
the transformation's domain accordingly
'''
if affine_pre != None:
self.dim = affine_pre.shape[1]-1
self.affine_pre_inv = linalg.inv(affine_pre).copy(order='C')
else:
self.affine_pre_inv = None
self.affine_pre = affine_pre
def set_affine_post(self, affine_post):
r'''
Establishes the post-multiplication affine matrix of this
transformation, computes its inverse and adjusts the dimension of
the transformation's domain accordingly
'''
if affine_post != None:
self.dim = affine_post.shape[1]-1
self.affine_post_inv = linalg.inv(affine_post).copy(order='C')
else:
self.affine_post_inv = None
self.affine_post = affine_post
def set_forward(self, forward):
r'''
Establishes the forward non-linear displacement field and adjusts
the dimension of the transformation's domain accordingly
'''
if forward != None:
self.dim = len(forward.shape)-1
self.forward = forward
def set_backward(self, backward):
r'''
Establishes the backward non-linear displacement field and adjusts
the dimension of the transformation's domain accordingly
'''
if backward != None:
self.dim = len(backward.shape)-1
self.backward = backward
def warp_forward(self, image):
r'''
Applies this transformation in the forward direction to the given image
using tri-linear interpolation
'''
if len(image.shape) == 3:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_volumeNN(
image, self.forward, self.affine_pre, self.affine_post))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_volume(
image, self.forward, self.affine_pre, self.affine_post))
else:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_imageNN(
image, self.forward, self.affine_pre, self.affine_post))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_image(
image, self.forward, self.affine_pre, self.affine_post))
return warped
def warp_backward(self, image):
r'''
Applies this transformation in the backward direction to the given
image using tri-linear interpolation
'''
if len(image.shape) == 3:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_volumeNN(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_volume(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
else:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_imageNN(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_image(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
return warped
def warp_forward_nn(self, image):
r'''
Applies this transformation in the forward direction to the given image
using nearest-neighbor interpolation
'''
if len(image.shape) == 3:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_volumeNN(
image, self.forward, self.affine_pre, self.affine_post))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_volumeNN(
image, self.forward, self.affine_pre, self.affine_post))
else:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_imageNN(
image, self.forward, self.affine_pre, self.affine_post))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_imageNN(
image, self.forward, self.affine_pre, self.affine_post))
return warped
def warp_backward_nn(self, image):
r'''
Applies this transformation in the backward direction to the given
image using nearest-neighbor interpolation
'''
if len(image.shape) == 3:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_volumeNN(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_volumeNN(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
else:
if image.dtype is np.dtype('int32'):
warped = np.array(
tf.warp_discrete_imageNN(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
elif image.dtype is np.dtype('float64'):
warped = np.array(
tf.warp_imageNN(
image, self.backward, self.affine_post_inv,
self.affine_pre_inv))
return warped
def scale_affines(self, factor):
r'''
Scales the pre- and post-multiplication affine matrices to be used
with a scaled domain. It updates the inverses as well.
'''
if self.affine_pre != None:
self.affine_pre = scale_affine(self.affine_pre, factor)
self.affine_pre_inv = linalg.inv(self.affine_pre).copy(order='C')
if self.affine_post != None:
self.affine_post = scale_affine(self.affine_post, factor)
self.affine_post_inv = linalg.inv(self.affine_post).copy(order='C')
def upsample(self, new_domain_forward, new_domain_backward):
r'''
Upsamples the displacement fields and scales the affine
pre- and post-multiplication affine matrices by a factor of 2. The
final outcome is that this transformation can be used in an upsampled
domain.
'''
if self.dim == 2:
if self.forward != None:
self.forward = 2*np.array(
tf.upsample_displacement_field(
self.forward,
np.array(new_domain_forward).astype(np.int32)))
if self.backward != None:
self.backward = 2*np.array(
tf.upsample_displacement_field(
self.backward,
np.array(new_domain_backward).astype(np.int32)))
else:
if self.forward != None:
self.forward = 2*np.array(
tf.upsample_displacement_field3D(
self.forward,
np.array(new_domain_forward).astype(np.int32)))
if self.backward != None:
self.backward = 2*np.array(
tf.upsample_displacement_field3D(
self.backward,
np.array(new_domain_backward).astype(np.int32)))
self.scale_affines(2.0)
def compute_inversion_error(self):
r'''
Returns the inversion error of the displacement fields
TO-DO: the inversion error should take into account the affine
transformations as well.
'''
if self.dim == 2:
residual, stats = tf.compose_vector_fields(self.forward,
self.backward)
else:
residual, stats = tf.compose_vector_fields3D(self.forward,
self.backward)
return residual, stats
def compose(self, applyFirst):
r'''
Computes the composition G(F(.)) where G is this transformation and
F is the transformation given as parameter
'''
B=applyFirst.affine_post
C=self.affine_pre
if B==None:
affine_prod=C
elif C==None:
affine_prod=B
else:
affine_prod=C.dot(B)
if affine_prod!=None:
affine_prod_inv=linalg.inv(affine_prod).copy(order='C')
else:
affine_prod_inv=None
if self.dim == 2:
forward=applyFirst.forward.copy()
tf.append_affine_to_displacement_field_2d(forward, affine_prod)
forward, stats = tf.compose_vector_fields(forward,
self.forward)
backward=self.backward.copy()
tf.append_affine_to_displacement_field_2d(backward, affine_prod_inv)
backward, stats = tf.compose_vector_fields(backward,
applyFirst.backward)
else:
forward=applyFirst.forward.copy()
tf.append_affine_to_displacement_field_3d(forward, affine_prod)
forward, stats = tf.compose_vector_fields3D(forward,
self.forward)
backward=self.backward.copy()
tf.append_affine_to_displacement_field_3d(backward, affine_prod_inv)
backward, stats = tf.compose_vector_fields3D(backward,
applyFirst.backward)
composition=TransformationModel(forward, backward,
applyFirst.affine_pre, self.affine_post)
return composition
def inverse(self):
r'''
Return the inverse of this transformation model. Warning: the matrices
and displacement fields are not copied
'''
inv=TransformationModel(self.backward, self.forward,
self.affine_post_inv, self.affine_pre_inv)
return inv
def consolidate(self):
r'''
Eliminates the affine transformations from the representation of this
transformation by appending/prepending them to the deformation fields.
'''
if self.dim == 2:
tf.prepend_affine_to_displacement_field_2d(self.forward, self.affine_pre)
tf.append_affine_to_displacement_field_2d(self.forward, self.affine_post)
tf.prepend_affine_to_displacement_field_2d(self.backward, self.affine_post_inv)
tf.append_affine_to_displacement_field_2d(self.backward, self.affine_pre_inv)
else:
tf.prepend_affine_to_displacement_field_3d(self.forward, self.affine_pre)
tf.append_affine_to_displacement_field_3d(self.forward, self.affine_post)
tf.prepend_affine_to_displacement_field_3d(self.backward, self.affine_post_inv)
tf.append_affine_to_displacement_field_3d(self.backward, self.affine_pre_inv)
self.affine_post = None
self.affine_pre = None
self.affine_post_inv = None
self.affine_pre_inv = None