1"""
2================================================
3Applying image-based deformations to streamlines
4================================================
5
6This example shows how to register streamlines into a template space by
7applying non-rigid deformations.
8
9At times we will be interested in bringing a set of streamlines into some
10common, reference space to compute statistics out of the registered
11streamlines. For a discussion on the effects of spatial normalization
12approaches on tractography the work by Green et al. [Greene17]_ can be read.
13
14For brevity, we will include in this example only streamlines going through
15the corpus callosum connecting left to right superior frontal cortex. The
16process of tracking and finding these streamlines is fully demonstrated in
17the :ref:`streamline_tools` example. If this example has been run, we can read
18the streamlines from file. Otherwise, we'll run that example first, by
19importing it. This provides us with all of the variables that were created in
20that example.
21
22In order to get the deformation field, we will first use two b0 volumes. Both
23moving and static images are assumed to be in RAS. The first one will be the
24b0 from the Stanford HARDI dataset:
25
26"""
27
28import numpy as np
29import nibabel as nib
30import os.path as op
31
32if not op.exists('lr-superiorfrontal.trk'):
33    from streamline_tools import *
34    vox_size = hardi_img.header.get_zooms()[0]
35else:
36    from dipy.core.gradients import gradient_table
37    from dipy.data import get_fnames
38    from dipy.io.gradients import read_bvals_bvecs
39    from dipy.io.image import load_nifti_data, load_nifti, save_nifti
40
41    hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi')
42
43    data, affine, hardi_img = load_nifti(hardi_fname, return_img=True)
44    vox_size = hardi_img.header.get_zooms()[0]
45    bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
46    gtab = gradient_table(bvals, bvecs)
47
48"""
49The second one will be the T2-contrast MNI template image, which we'll need to
50reslice to 2x2x2 mm isotropic voxel resolution to match the HARDI data.
51
52"""
53
54from dipy.data.fetcher import (fetch_mni_template, read_mni_template)
55from dipy.align.reslice import reslice
56
57fetch_mni_template()
58img_t2_mni = read_mni_template("a", contrast="T2")
59
60new_zooms = (2., 2., 2.)
61data2, affine2 = reslice(np.asarray(img_t2_mni.dataobj), img_t2_mni.affine,
62                         img_t2_mni.header.get_zooms(), new_zooms)
63img_t2_mni = nib.Nifti1Image(data2, affine=affine2)
64
65"""
66We filter the diffusion data from the Stanford HARDI dataset to find the b0
67images.
68
69"""
70
71b0_idx_stanford = np.where(gtab.b0s_mask)[0]
72b0_data_stanford = data[..., b0_idx_stanford]
73
74"""
75We then remove the skull from them:
76
77"""
78
79from dipy.segment.mask import median_otsu
80
81b0_masked_stanford, _ = median_otsu(b0_data_stanford,
82                vol_idx=list(range(b0_data_stanford.shape[-1])),
83                median_radius=4, numpass=4)
84
85"""
86And go on to compute the Stanford HARDI dataset mean b0 image.
87
88"""
89
90mean_b0_masked_stanford = np.mean(b0_masked_stanford, axis=3,
91                                  dtype=data.dtype)
92
93
94"""
95We will register the mean b0 to the MNI T2 image template non-rigidly to
96obtain the deformation field that will be applied to the streamlines. This is
97just one of the strategies that can be used to obtain an appropriate
98deformation field. Other strategies include computing an FA template map as
99the static image, and registering the FA map of the moving image to it. This
100may may eventually lead to results with improved accuracy, since a T2-contrast
101template image as the target for normalization does not provide optimal tissue
102contrast for maximal SyN performance.
103
104We will first perform an affine registration to roughly align the two volumes:
105
106"""
107
108from dipy.align.imaffine import (MutualInformationMetric, AffineRegistration,
109                                 transform_origins)
110from dipy.align.transforms import (TranslationTransform3D, RigidTransform3D,
111                                   AffineTransform3D)
112
113static = np.asarray(img_t2_mni.dataobj)
114static_affine = img_t2_mni.affine
115moving = mean_b0_masked_stanford
116moving_affine = hardi_img.affine
117
118"""
119We estimate an affine that maps the origin of the moving image to that of the
120static image. We can then use this later to account for the offsets of each
121image.
122
123"""
124
125affine_map = transform_origins(static, static_affine, moving, moving_affine)
126
127"""
128We specify the mismatch metric:
129
130"""
131
132nbins = 32
133sampling_prop = None
134metric = MutualInformationMetric(nbins, sampling_prop)
135
136"""
137As well as the optimization strategy:
138
139"""
140
141level_iters = [10, 10, 5]
142sigmas = [3.0, 1.0, 0.0]
143factors = [4, 2, 1]
144affine_reg = AffineRegistration(metric=metric, level_iters=level_iters,
145                                sigmas=sigmas, factors=factors)
146transform = TranslationTransform3D()
147
148params0 = None
149translation = affine_reg.optimize(static, moving, transform, params0,
150                                  static_affine, moving_affine)
151transformed = translation.transform(moving)
152transform = RigidTransform3D()
153
154rigid_map = affine_reg.optimize(static, moving, transform, params0,
155                                static_affine, moving_affine,
156                                starting_affine=translation.affine)
157transformed = rigid_map.transform(moving)
158transform = AffineTransform3D()
159
160"""
161We bump up the iterations to get a more exact fit:
162
163"""
164
165affine_reg.level_iters = [1000, 1000, 100]
166highres_map = affine_reg.optimize(static, moving, transform, params0,
167                                  static_affine, moving_affine,
168                                  starting_affine=rigid_map.affine)
169transformed = highres_map.transform(moving)
170
171
172"""
173We now perform the non-rigid deformation using the Symmetric Diffeomorphic
174Registration (SyN) Algorithm proposed by Avants et al. [Avants09]_ (also
175implemented in the ANTs software [Avants11]_):
176
177"""
178
179from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
180from dipy.align.metrics import CCMetric
181
182metric = CCMetric(3)
183level_iters = [10, 10, 5]
184sdr = SymmetricDiffeomorphicRegistration(metric, level_iters)
185
186mapping = sdr.optimize(static, moving, static_affine, moving_affine,
187                       highres_map.affine)
188warped_moving = mapping.transform(moving)
189
190"""
191We show the registration result with:
192
193"""
194
195from dipy.viz import regtools
196
197regtools.overlay_slices(static, warped_moving, None, 0, 'Static', 'Moving',
198                        'transformed_sagittal.png')
199regtools.overlay_slices(static, warped_moving, None, 1, 'Static', 'Moving',
200                        'transformed_coronal.png')
201regtools.overlay_slices(static, warped_moving, None, 2, 'Static', 'Moving',
202                        'transformed_axial.png')
203
204"""
205.. figure:: transformed_sagittal.png
206   :align: center
207.. figure:: transformed_coronal.png
208   :align: center
209.. figure:: transformed_axial.png
210   :align: center
211
212   Deformable registration result.
213
214"""
215
216"""
217We read the streamlines from file in voxel space:
218
219"""
220
221from dipy.io.streamline import load_tractogram
222
223sft = load_tractogram('lr-superiorfrontal.trk', 'same')
224
225
226"""
227We then apply the obtained deformation field to the streamlines. Note that the
228process can be sensitive to image orientation and voxel resolution. Thus, we
229first apply the non-rigid warping and simultaneously apply a computed rigid
230affine transformation whose extents must be corrected to account for the
231different voxel grids of the moving and static images.
232
233"""
234
235from dipy.tracking.streamline import deform_streamlines
236
237# Create an isocentered affine
238target_isocenter = np.diag(np.array([-vox_size, vox_size, vox_size, 1]))
239
240# Take the off-origin affine capturing the extent contrast between the mean b0
241# image and the template
242origin_affine = affine_map.affine.copy()
243
244"""
245In order to align the FOV of the template and the mirror image of the
246streamlines, we first need to flip the sign on the x-offset and y-offset so
247that we get the mirror image of the forward deformation field.
248
249We need to use the information about the origin offsets (i.e. between the
250static and moving images) that we obtained using :meth:`transform_origins`:
251
252"""
253
254origin_affine[0][3] = -origin_affine[0][3]
255origin_affine[1][3] = -origin_affine[1][3]
256
257"""
258:meth:`transform_origins` returns this affine transformation with (1, 1, 1)
259zooms and not (2, 2, 2), which means that the offsets need to be scaled by 2.
260Thus, we scale z by the voxel size:
261
262"""
263
264origin_affine[2][3] = origin_affine[2][3]/vox_size
265
266"""
267But when scaling the z-offset, we are also implicitly scaling the y-offset as
268well (by 1/2).Thus we need to correct for this by only scaling the y by the
269square of the voxel size (1/4, and not 1/2):
270
271"""
272
273origin_affine[1][3] = origin_affine[1][3]/vox_size**2
274
275# Apply the deformation and correct for the extents
276mni_streamlines = deform_streamlines(
277    sft.streamlines, deform_field=mapping.get_forward_field(),
278    stream_to_current_grid=target_isocenter,
279    current_grid_to_world=origin_affine, stream_to_ref_grid=target_isocenter,
280    ref_grid_to_world=np.eye(4))
281
282"""
283We display the original streamlines and the registered streamlines:
284
285"""
286
287from dipy.viz import has_fury
288
289
290def show_template_bundles(bundles, show=True, fname=None):
291
292    scene = window.Scene()
293    template_actor = actor.slicer(static)
294    scene.add(template_actor)
295
296    lines_actor = actor.streamtube(bundles, window.colors.orange,
297                                   linewidth=0.3)
298    scene.add(lines_actor)
299
300    if show:
301        window.show(scene)
302    if fname is not None:
303        window.record(scene, n_frames=1, out_path=fname, size=(900, 900))
304
305
306if has_fury:
307
308    from fury import actor, window
309
310    show_template_bundles(mni_streamlines, show=False,
311                          fname='streamlines_DSN_MNI.png')
312
313    """
314    .. figure:: streamlines_DSN_MNI.png
315       :align: center
316
317       Streamlines before and after registration.
318
319    The corpus callosum bundles have been deformed to adapt to the MNI
320    template space.
321
322    """
323
324"""
325Finally, we save the registered streamlines:
326
327"""
328
329from dipy.io.stateful_tractogram import Space, StatefulTractogram
330from dipy.io.streamline import save_tractogram
331
332sft = StatefulTractogram(mni_streamlines, img_t2_mni, Space.RASMM)
333
334save_tractogram(sft, 'mni-lr-superiorfrontal.trk', bbox_valid_check=False)
335
336
337"""
338References
339----------
340
341.. [Avants09] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009).
342   Symmetric Diffeomorphic Image Registration with Cross-Correlation:
343   Evaluating Automated Labeling of Elderly and Neurodegenerative Brain, 12(1),
344   26-41.
345
346.. [Avants11] Avants, B. B., Tustison, N., & Song, G. (2011). Advanced
347   Normalization Tools (ANTS), 1-35.
348
349.. [Greene17] Greene, C., Cieslak, M., & Grafton, S. T. (2017). Effect of
350   different spatial normalization approaches on tractography and structural
351   brain networks. Network Neuroscience, 1-19.
352
353.. include:: ../links_names.inc
354
355"""
356