1 /******************* DWIstructtensor.c *************************************/
2 /* Author: Daniel Glen, 13 Jun 2005 */
3 /* compute tensor of structure of DWI volumes for the purpose of anisotropic*/
4 /* smoothing of the image. */
5 /* called as function to generate D tensor, which can be a 2D or 3D tensor */
6 /* The D tensor is then used as basis for smoothing along directionality of */
7 /* DWI image. */
8 /* This tensor is not the same as the traditional diffusion tensor of DTI */
9 /* imaging, but it is a diffusion tensor referring to the diffusion within */
10 /* the image, the movement of stuff within the image. */
11 /* Only one D tensor volume is generated for all of the DWI volumes */
12 /* The 2D form has three elements in tensor form for each voxel or a 2x2 */
13 /* matrix in matrix form. */
14 /* The 3D form has 6 elements in tensor form for each voxel or 3x3 in matrix*/
15 /* form. */
16 /* This program is appropriate as a function within another main program */
17 /* to generate the tensor. The calling function could then use the tensor to*/
18 /* smooth the image */
19
20
21 #ifdef __GNUC__
22 /* inline used to make macro-equivalent speed functions */
23 /* but only available for gcc */
24 #define INLINE inline
25 #else
26 #define INLINE /**/
27 #endif
28
29 #include "thd_shear3d.h"
30 #include "matrix.h"
31 #include "mrilib.h"
32
33 #define TINYNUMBER 1E-10
34 #define SMALLNUMBER 1E-4
35
36 static char D_prefix[THD_MAX_PREFIX] = "TempDAni";
37
38 float aniso_sigma1 = 0.5;
39 float aniso_sigma2 = 1.0;
40
41
42 THD_3dim_dataset *DWIstructtensor(THD_3dim_dataset * DWI_dset, int flag2D3D,
43 byte *maskptr, int smooth_flag,
44 int save_tempdsets_flag, float *cen);
45 void Smooth_DWI_dset(THD_3dim_dataset * DWI_dset, int flag2D3D);
46 void Smooth_Gradient_Matrix(MRI_IMARR *Gradient_Im, int flag2D3D);
47 MRI_IMARR *Compute_Gradient_Matrix(THD_3dim_dataset *DWI_dset, int flag2D3D,byte*maskptr,int prodflag, int
48 smooth_flag, float smooth_factor);
49 MRI_IMARR *Compute_Gradient_Matrix_Im(MRI_IMAGE *SourceIm, int flag2D3D, byte *maskptr,
50 int xflag, int yflag, int zflag);
51 MRI_IMARR *Eig_Gradient(MRI_IMARR *Gradient_Im, int flag2D3D, byte *maskptr);
52 MRI_IMARR *Compute_Phi(MRI_IMARR *EV_Im, int flag2D3D, byte *maskptr);
53 MRI_IMARR *ComputeDTensor(MRI_IMARR *phi_Im, int flag2D3D, byte *maskptr);
54 THD_3dim_dataset *Copy_IMARR_to_dset(THD_3dim_dataset * base_dset,MRI_IMARR *Imptr, char *new_prefix);
55 static INLINE float vox_val(int x,int y,int z,float *imptr, int nx, int ny, int nz, byte *maskptr,
56 int i, int j, int k);
57 extern THD_3dim_dataset * Copy_dset_to_float(THD_3dim_dataset * dset , char * new_prefix );
58 void Compute_IMARR_Max(MRI_IMARR *Imptr);
59 float Find_Max_Im(MRI_IMAGE *im, byte *maskptr);
60 void Save_imarr_to_dset(MRI_IMARR *Imarr_Im, THD_3dim_dataset *base_dset,
61 char *dset_name);
62
63 extern int compute_method; /* determines which method to compute phi */
64
65 static int with_diffmeasures=0;
set_with_diff_measures(int v)66 void set_with_diff_measures(int v) { with_diffmeasures = v; }
get_with_diff_measures(void)67 int get_with_diff_measures(void) { return(with_diffmeasures); }
68 THD_3dim_dataset *Compute_DiffMeasures(MRI_IMARR *EV_Im, int flag2D3D,
69 byte *maskptr,
70 THD_3dim_dataset *grid_dset,
71 float *cen);
72
73 /*! compute image diffusion tensor, D, anisotropic smoothing of DWI
74 If (save_tempdsets_flag) then temp datasets are saved
75 with a suffix of .DD where DD is the iteration number,
76 which is save_tempdsets_flag-1
77 */
78 THD_3dim_dataset *
DWIstructtensor(THD_3dim_dataset * DWI_dset,int flag2D3D,byte * maskptr,int smooth_flag,int save_tempdsets_flag,float * cen)79 DWIstructtensor(THD_3dim_dataset * DWI_dset, int flag2D3D, byte *maskptr,
80 int smooth_flag, int save_tempdsets_flag, float *cen)
81 {
82 MRI_IMARR *Gradient_Im, *EV_Im, *phi_Im, *D_Im;
83 THD_3dim_dataset *D_dset;
84 char obuff[128]={""};
85
86 ENTRY("DWIstructtensor");
87
88 /*if(smooth_flag)
89 Smooth_DWI_dset(tempdset,flag2D3D);*/ /* smooth DWI images a little with Gaussian
90 smoothing */
91 /* compute gradients of smoothed DWI images */
92 /* and form matrix of gradients - imarr with 3 sub-briks for 2D */
93 Gradient_Im = Compute_Gradient_Matrix(DWI_dset, flag2D3D, maskptr,
94 1,smooth_flag, aniso_sigma1);
95 /* THD_delete_3dim_dataset(tempdset , False ) ;*/ /* delete temporary copy */
96 if(save_tempdsets_flag) {
97 snprintf(obuff,127,"Gradient.%02d", save_tempdsets_flag-1);
98 Save_imarr_to_dset(Gradient_Im,DWI_dset, obuff);
99 }
100 /* smooth each component of gradient matrix more */
101 if(smooth_flag)
102 Smooth_Gradient_Matrix(Gradient_Im, flag2D3D);
103
104 /* compute eigenvalues, eigenvectors of Smoothed gradient matrix */
105 /* imarr with 6 sub-briks for 2D (extended the Gradient_Im from 3 to 6) */
106 EV_Im = Eig_Gradient(Gradient_Im, flag2D3D, maskptr);
107
108 if(save_tempdsets_flag) {
109 snprintf(obuff,127,"Eigens.%02d", save_tempdsets_flag-1);
110 Save_imarr_to_dset(EV_Im, DWI_dset, obuff);
111 }
112
113 if (save_tempdsets_flag && with_diffmeasures) {
114 THD_3dim_dataset *dout=NULL;
115 if (!(dout = Compute_DiffMeasures(EV_Im, flag2D3D, maskptr,
116 DWI_dset, cen))) {
117 ERROR_message("Tragedy has struck. The turkey is still frozen");
118 } else {
119 snprintf(obuff,127,"Diff_measures.%02d", save_tempdsets_flag-1);
120 EDIT_dset_items(dout,ADN_prefix, obuff, ADN_none);
121 DSET_overwrite (dout);
122 INFO_message(" Output dataset %s", DSET_BRIKNAME(dout));
123 THD_delete_3dim_dataset( dout, False ) ; /* delete temporary dset
124 including DiffMeas_Im */
125 }
126 }
127
128 /*Compute_IMARR_Max(EV_Im);*/
129 /* compute phi (kind of reciprocal of eigenvalues) */
130 /* replace first two eigenvalue sub-briks for phi_Im */
131 phi_Im = Compute_Phi(EV_Im, flag2D3D, maskptr);
132 if(save_tempdsets_flag) {
133 snprintf(obuff,127,"phi.%02d", save_tempdsets_flag-1);
134 Save_imarr_to_dset(phi_Im,DWI_dset, obuff);
135 }
136 /*printf("computed phi_Im\n");*/
137 /*Compute_IMARR_Max(phi_Im);*/
138
139 /* compute D, diffusion tensor of structure of DWI */
140 /* replace first 3 sub-briks for 2D with Dxx, Dxy, Dyy */
141 D_Im = ComputeDTensor(phi_Im, flag2D3D, maskptr);
142 /* do not have to free any temporary image arrays */
143 /* DESTROY_IMARR(phi_Im);*/
144 /* DESTROY_IMARR(EV_Im); */
145 /* for 2D, keep first three sub-briks and remove remaining sub-briks */
146 if(flag2D3D==2) {
147 TRUNCATE_IMARR(D_Im,3);
148 D_dset = Copy_IMARR_to_dset(DWI_dset, D_Im, D_prefix);
149 tross_Copy_History (DWI_dset, D_dset);
150 EDIT_dset_items (D_dset, ADN_brick_label_one + 0, "Dxx", ADN_none);
151 EDIT_dset_items (D_dset, ADN_brick_label_one + 1, "Dxy", ADN_none);
152 EDIT_dset_items (D_dset, ADN_brick_label_one + 2, "Dyy", ADN_none);
153 }
154 else {
155 TRUNCATE_IMARR(D_Im,6);
156 D_dset = Copy_IMARR_to_dset(DWI_dset, D_Im, D_prefix);
157 tross_Copy_History (DWI_dset, D_dset);
158 EDIT_dset_items (D_dset, ADN_brick_label_one + 0, "Dxx", ADN_none);
159 EDIT_dset_items (D_dset, ADN_brick_label_one + 1, "Dxy", ADN_none);
160 EDIT_dset_items (D_dset, ADN_brick_label_one + 2, "Dxz", ADN_none);
161 EDIT_dset_items (D_dset, ADN_brick_label_one + 3, "Dyy", ADN_none);
162 EDIT_dset_items (D_dset, ADN_brick_label_one + 4, "Dyz", ADN_none);
163 EDIT_dset_items (D_dset, ADN_brick_label_one + 5, "Dzz", ADN_none);
164 }
165
166 EDIT_dset_items( D_dset ,
167 ADN_malloc_type , DATABLOCK_MEM_MALLOC , /* store in memory */
168 ADN_prefix , "Dtensor",
169 ADN_label1 , "Dtensor" ,
170 ADN_none ) ;
171 /* return D - diffusion tensor of image */
172 RETURN(D_dset);
173 }
174
175 /*! save IMARR structure to temporary dataset and write to disk */
176 void
Save_imarr_to_dset(MRI_IMARR * Imarr_Im,THD_3dim_dataset * base_dset,char * dset_name)177 Save_imarr_to_dset(MRI_IMARR *Imarr_Im, THD_3dim_dataset *base_dset,
178 char *dset_name)
179 {
180 THD_3dim_dataset *temp_dset;
181 int nbriks,i;
182 char tempstring[256];
183
184 ENTRY("Save_imarr_dset");
185 temp_dset = Copy_IMARR_to_dset(base_dset, Imarr_Im, dset_name);
186 nbriks = temp_dset->dblk->nvals;
187 tross_Copy_History (base_dset, temp_dset);
188 if (!strncmp(dset_name, "Eigens.",7) && (nbriks == 12 || nbriks == 8) ) {
189 if (nbriks == 12) { /* More informative labels, check if directions are
190 RAI or IJK...*/
191 for(i=0;i<3;i++) {
192 sprintf(tempstring,"L%d", i+1);
193 EDIT_dset_items(temp_dset,
194 ADN_brick_label_one+ i,tempstring,ADN_none);
195 }
196 for(i=3;i<12;i=i+3) {
197 sprintf(tempstring,"V%d.x", i/3);
198 EDIT_dset_items(temp_dset,
199 ADN_brick_label_one+ i,tempstring,ADN_none);
200 sprintf(tempstring,"V%d.y", i/3);
201 EDIT_dset_items(temp_dset,
202 ADN_brick_label_one+ i+1,tempstring,ADN_none);
203 sprintf(tempstring,"V%d.z", i/3);
204 EDIT_dset_items(temp_dset,
205 ADN_brick_label_one+ i+2,tempstring,ADN_none);
206 }
207 } else if (nbriks == 8) {
208 for(i=0;i<2;i++) {
209 sprintf(tempstring,"L%d", i+1);
210 EDIT_dset_items(temp_dset,
211 ADN_brick_label_one+ i,tempstring,ADN_none);
212 }
213 for(i=2;i<8;i=i+2) {
214 sprintf(tempstring,"V%d.x", i/2);
215 EDIT_dset_items(temp_dset,
216 ADN_brick_label_one+ i,tempstring,ADN_none);
217 sprintf(tempstring,"V%d.y", i/2);
218 EDIT_dset_items(temp_dset,
219 ADN_brick_label_one+ i+1,tempstring,ADN_none);
220 }
221 } else { /* Should not be here! */
222 for(i=0;i<nbriks;i++) {
223 sprintf(tempstring,"%s_%d", dset_name, i);
224 EDIT_dset_items(temp_dset,
225 ADN_brick_label_one + i,tempstring,ADN_none);
226 }
227 }
228 } else { /* default */
229 for(i=0;i<nbriks;i++) {
230 sprintf(tempstring,"%s_%d", dset_name, i);
231 EDIT_dset_items(temp_dset,ADN_brick_label_one + i,tempstring,ADN_none);
232 }
233 }
234
235 EDIT_dset_items(temp_dset ,
236 ADN_malloc_type , DATABLOCK_MEM_MALLOC , /* store in memory */
237 ADN_prefix , dset_name,
238 ADN_label1 , dset_name ,
239 ADN_none ) ;
240 DSET_overwrite (temp_dset);
241 INFO_message(" Output dataset %s", DSET_BRIKNAME(temp_dset));
242 temp_dset->dblk->brick = NULL; /* don't delete MRI_IMARR structure */
243 THD_delete_3dim_dataset( temp_dset, False ) ; /* delete temporary dset */
244 /* from memory (not disk) */
245
246 EXRETURN;
247 }
248
249 /* apply small amount of smoothing to data */
250 void
Smooth_DWI_dset(THD_3dim_dataset * DWI_dset,int flag2D3D)251 Smooth_DWI_dset(THD_3dim_dataset *DWI_dset, int flag2D3D)
252 {
253 float *ar;
254 MRI_IMAGE *data_im = NULL;
255 int nx, ny, nz, fim_type, i;
256 THD_dataxes * daxes ;
257 float dz;
258
259 ENTRY("Smooth_DWI_dset");
260
261 /** load the grid parameters **/
262 daxes = DWI_dset->daxes ;
263 nx = daxes->nxx ;
264 ny = daxes->nyy ;
265 nz = daxes->nzz ;
266
267 fim_type = MRI_float ; /* only works with floats here */
268
269 if(flag2D3D == 2) /* for 2D, don't smooth in Z direction */
270 dz = 0.0f;
271 else
272 dz = 1.0f;
273 /* smooth DWI images a little with Gaussian smoothing */
274 for(i=0;i<DWI_dset->dblk->nvals; i++) { /* for each sub-brik in dataset */
275 data_im = DSET_BRICK (DWI_dset, i); /* set pointer to the ith sub-brik of the dataset */
276 ar = (float *) mri_data_pointer(data_im) ;
277 EDIT_blur_volume( nx,ny,nz, 1.0f,1.0f,dz, fim_type, ar, 0.5f ) ;
278 }
279 EXRETURN;
280 }
281
282 /* apply small amount of smoothing to data */
283 void
Smooth_Gradient_Matrix(MRI_IMARR * Gradient_Im,int flag2D3D)284 Smooth_Gradient_Matrix(MRI_IMARR *Gradient_Im, int flag2D3D)
285 {
286 float *ar;
287 MRI_IMAGE *data_im = NULL;
288 int nx, ny, nz, fim_type, i;
289 float dz;
290
291 ENTRY("Smooth_Gradient_Matrix");
292
293 /** load the grid parameters **/
294
295 fim_type = MRI_float ; /* only works with floats here */
296
297 /* smooth DWI images a little with Gaussian smoothing */
298 for(i=0;i<Gradient_Im->num; i++) { /* for each sub-brik in dataset */
299 data_im = Gradient_Im->imarr[i]; /* set pointer to the ith sub-brik of the dataset */
300 ar = (float *) mri_data_pointer(data_im) ;
301 nx = data_im->nx; ny = data_im->ny; nz = data_im->nz;
302 if(flag2D3D == 2) /* for 2D, don't smooth in Z direction */
303 dz = 0.0f;
304 else
305 dz = 1.0f;
306
307 EDIT_blur_volume( nx,ny,nz, 1.0f,1.0f,dz, fim_type, ar, aniso_sigma2 ) ;
308 }
309 EXRETURN;
310 }
311
312 /****************************************************************************************/
313 /* old unoptimized code */
314 /* compute numerical gradients for each voxel and compose matrix for smoothing
315 including [(du/dx)^2 du/dx*du/dy (du/dy)^2] */
316 MRI_IMARR *
Compute_Gradient_Matrix_old(THD_3dim_dataset * DWI_dset,int flag2D3D,byte * maskptr,int prodflag)317 Compute_Gradient_Matrix_old(THD_3dim_dataset *DWI_dset, int flag2D3D, byte *maskptr, int prodflag)
318 {
319 /* DWI_dset is input dataset */
320 /* flag2D3D is flag for dimensionality of gradient */
321 /* maskptr is pointer to mask array to mask data - null if no mask */
322 /* prodflag is productflag whether to simply compute du/dx and du/dy or du/dx^2,
323 du/dx*du/dy, du/dy^2 */
324 /* gradient matrix is returned as MRI_IMARR (2 or 3 sub-briks for 2D case)*/
325
326 /* edge points and masked points are treated equivalently */
327 /* with a test for the index of each node in the kernels and
328 the central voxels themselves */
329
330 /* du/dx is calculated with 3x3 kernel for 2D as */
331 /* -a 0 a v0 0 v3 */
332 /* -b 0 b v1 0 v4 */
333 /* -a 0 a v2 0 v5*/
334 /* where a=3/16, b= 10/16 */
335
336 /* du/dy is calculated with 3x3 kernel for 2D as */
337 /* c d c v0 v1 v2 */
338 /* 0 0 0 0 0 0 */
339 /* -c -d -c v3 v4 v5 */
340 /* where c=3/16, d= 10/16 */
341
342 /* for 3d, instead of alternating rows and columns, */
343 /* use alternating planes in direction (p+1) - (p-1) for du/dx */
344 /* a b a a b a r+1 */
345 /* b c b - b c b r */
346 /* a b a a b a r-1 */
347 /* q-1 q q+1 */
348 /* where a = 0.02, b=0.06,c =0.18 */
349 /* two vertical planes before and after the current voxel for du/dx */
350 /* two horizontal planes above and below the current voxel for du/dy */
351 /* two slices before and after the current voxel for du/dz */
352
353 MRI_IMARR *Gradient_Im;
354 MRI_IMAGE *im, *data_im;
355 byte *tempmaskptr;
356
357 float *ar,*gptr[6];
358 static double a, b, c, d;
359 double dudx, dudy, dudz;
360 float v0, v1, v2, v3, v4, v5, tempv, temp;
361 float vv[3][3][3]; /* voxel values for cubic stencil */
362 int nx, ny, nz, i, j, k, l, ii, nbriks, nout,ll ,kk, jj;
363 THD_dataxes * daxes ;
364 /*float dx = 1.0;*/ /* delta x - assume cubical voxels for now */
365
366 ENTRY("Compute_Gradient_Matrix_old");
367
368 tempmaskptr = maskptr;
369 /* set up constants for kernel */
370 if(flag2D3D==2) {
371 a = 0.1875; /* (2.0 * dx); */ /*3/16;*/
372 b = 0.625; /* (2.0 * dx);*/ /* 10/16;*/
373 c = 0.1875;
374 d = 0.625;
375
376 if(prodflag)
377 nout = 3;
378 else
379 nout = 2;
380 }
381 else {
382 a = 0.02;
383 b = 0.06;
384 c = 0.18;
385 if(prodflag)
386 nout = 6;
387 else
388 nout = 3;
389 }
390
391 /** load the grid parameters **/
392 daxes = DWI_dset->daxes ;
393 nx = daxes->nxx ;
394 ny = daxes->nyy ;
395 nz = daxes->nzz ;
396 nbriks = DWI_dset->dblk->nvals;
397 /* make new Image Array to hold gradients and then gradient products */
398 INIT_IMARR(Gradient_Im);
399 for(i=0;i<nout; i++) { /* create 3 sub-briks for du/dx^2, du/dx*du/dy and du/dy^2 */
400 im = mri_new_vol(nx, ny, nz, MRI_float);
401 if(im==NULL) {
402 ERROR_message("can not create temporary data storage");
403 RETURN(NULL);
404 }
405 ADDTO_IMARR(Gradient_Im, im);
406 }
407
408
409 for(ii=0;ii<nout;ii++) {
410 im = (Gradient_Im->imarr[ii]);
411 gptr[ii] = (float *) mri_data_pointer(im);
412 if(gptr[ii]==NULL) {
413 ERROR_message("can not create temporary data storage pointers");
414 RETURN(NULL);
415 }
416 }
417
418 for(j=0;j<nz;j++) { /* for each slice in sub-brik */
419 for(k=0;k<ny;k++) { /* for each row */
420 for(l=0;l<nx;l++) { /* for each column */
421 for(ii=0;ii<nout;ii++)
422 *gptr[ii] = 0.0; /* initialize each summed gradient product component in the output briks */
423
424 if((maskptr!=NULL) && (!*tempmaskptr++)) { /* check if point is in mask or not */
425 for(ii=0;ii<nout;ii++)
426 gptr[ii]++;
427 }
428 else {
429 for(i=0;i<nbriks; i++) { /* for each sub-brik in dataset */
430 data_im = DSET_BRICK (DWI_dset, i); /* set pointer to the ith sub-brik of the dataset */
431 ar = (float *) mri_data_pointer(data_im) ;
432
433 if(flag2D3D==2) {
434 /* column before voxel*/
435 /* voxel_value(col-1, row-1) */
436 v0 = vox_val(l-1,k-1,j,ar,nx,ny,nz,maskptr,l,k,j);
437 /* voxel_value(col-1, row) */
438 v1 = vox_val(l-1,k,j,ar,nx,ny,nz,maskptr,l,k,j);
439 /* voxel_value(col-1, row+1) */
440 v2 = vox_val(l-1,k+1,j,ar,nx,ny,nz,maskptr,l,k,j);
441
442 /* column after voxel */
443 /* voxel_value(col+1,row-1,l,k,j) */
444 v3 = vox_val(l+1,k-1,j,ar,nx,ny,nz,maskptr,l,k,j);
445 /* voxel_value(col+1,row,l,k,j) */
446 v4 = vox_val(l+1,k,j,ar,nx,ny,nz,maskptr,l,k,j);
447
448 /* voxel_value(col+1,row+1) */
449 v5 = vox_val(l+1,k+1,j,ar,nx,ny,nz,maskptr,l,k,j);
450 dudx = a*(v3+v5-v0-v2) + b*(v4-v1);
451
452 /* row before voxel*/
453 /* voxel_value(col-1, row-1) */
454 /* v0 = stays same */
455 /* voxel_value(col-1, row) */
456 v1 = vox_val(l,k-1,j,ar,nx,ny,nz,maskptr,l,k,j);
457 /* voxel_value(col-1, row+1) */
458 tempv = v3; /* swap v2,v3 for du/dy */
459 v3 = v2; /* previously found, use for lower left corner of kernel */
460 v2 = tempv; /* vox_val(l+1,k-1,j,ar,nx,ny,nz,maskptr,l,k,j);*/
461
462 /* row after voxel */
463 /* voxel_value(col+1,row-1) defined above */
464 /* v3 = VOX_VAL(l+1,k-1,sliceoffsetptr,nx,ny);*/
465 /* voxel_value(col+1,row) */
466 v4 = vox_val(l,k+1,j,ar,nx,ny,nz,maskptr,l,k,j);
467 /* voxel_value(col+1,row+1) */
468 /* v5 stays same */
469 dudy = c*(v3+v5-v0-v2) + d*(v4-v1);
470 if(prodflag) {
471 *(gptr[0]) += dudx * dudx; /* sum gradient product components in output image array */
472 *(gptr[1]) += dudx * dudy;
473 *(gptr[2]) += dudy * dudy;
474 }
475 else {
476 *(gptr[0]) += dudx; /* sum gradient components in output image array */
477 *(gptr[1]) += dudy;
478 }
479 } /* end of 2D gradient */
480 else { /* this is 3D */
481
482 /* build 27 point stencil (0,0,0) (2,2,2) */
483 /* don't actually need to get central point (1,1,1) */
484 for(ll=0;ll<3;ll++) {
485 for(kk=0;kk<3;kk++) {
486 for(jj=0;jj<3;jj++) {
487 vv[ll][kk][jj] = vox_val(l-1+ll, k-1+kk, j-1+jj, ar, nx, ny, nz, maskptr, l, k, j);
488 }
489 }
490 }
491
492 /* du/dx across alternating planes left and right of current voxel */
493 /* corners of cube */
494 /* centers of edges of cube */
495 dudx = a * ( vv[2][0][0] + vv[2][0][2] + vv[2][2][0] + vv[2][2][2] - \
496 vv[0][0][0] - vv[0][0][2] - vv[0][2][0] - vv[0][2][2]) + \
497 b * ( vv[2][0][1] + vv[2][1][0] + vv[2][1][2] + vv[2][2][1] - \
498 vv[0][0][0] - vv[0][1][0] - vv[0][1][2] - vv[0][2][1]) + \
499 c * ( vv[2][1][1] - vv[0][1][1]); /* centers of cube */
500
501 /* du/dy across alternating planes above and below current voxel */
502 dudy = a * ( vv[0][2][0] + vv[2][2][0] + vv[0][2][2] + vv[2][2][2] - \
503 vv[0][0][0] - vv[2][0][0] - vv[0][0][2] - vv[2][0][2]) + \
504 b * ( vv[1][2][0] + vv[0][2][1] + vv[2][2][1] + vv[1][2][2] - \
505 vv[1][0][0] - vv[0][0][1] - vv[2][0][1] - vv[1][0][2]) + \
506 c * ( vv[1][2][1] - vv[1][0][1]); /* centers of square faces of cube */
507
508 /* du/dz across alternating slices before and after current voxel */
509 dudz = a * ( vv[0][0][2] + vv[2][0][2] + vv[0][2][2] + vv[2][2][2] - \
510 vv[0][0][0] - vv[2][0][0] - vv[0][2][0] - vv[2][2][0]) + \
511 b * ( vv[1][0][2] + vv[0][1][2] + vv[2][1][2] + vv[1][2][2] - \
512 vv[1][0][0] - vv[0][1][0] - vv[2][1][0] - vv[1][2][0]) + \
513 c * ( vv[1][1][2] - vv[1][1][0]); /* centers of square faces of cube */
514
515 if(prodflag) {
516 *(gptr[0]) += dudx * dudx; /* sum gradient product components in output image array */
517 *(gptr[1]) += dudx * dudy;
518 *(gptr[2]) += dudx * dudz;
519 *(gptr[3]) += dudy * dudy;
520 *(gptr[4]) += dudy * dudz;
521 *(gptr[5]) += dudz * dudz;
522 }
523 else {
524 *(gptr[0]) += dudx; /* sum gradient components in output image array */
525 *(gptr[1]) += dudy;
526 *(gptr[2]) += dudz;
527 }
528
529 } /* end of 3D gradient */
530
531 } /* sum over all sub-briks */
532
533 for(ii=0;ii<nout;ii++) {
534 *gptr[ii] = *gptr[ii] / nbriks;/* normalize gradient for number of briks*/
535 temp = fabs(*gptr[ii]);
536 if(temp<TINYNUMBER)
537 *gptr[ii] = 0.0;
538 gptr[ii]++; /*and increment pointers*/
539 }
540 } /* not masked point */
541 }
542 }
543 }
544
545 RETURN(Gradient_Im);
546 }
547
548
549 /* new optimized code trial*/
550 /**********************************************************************************************************/
551 /* compute numerical gradients for each voxel and compose matrix for smoothing
552 including [(du/dx)^2 du/dx*du/dy (du/dy)^2] */
553 MRI_IMARR *
Compute_Gradient_Matrix(THD_3dim_dataset * DWI_dset,int flag2D3D,byte * maskptr,int prodflag,int smoothflag,float smooth_factor)554 Compute_Gradient_Matrix(THD_3dim_dataset *DWI_dset, int flag2D3D, byte *maskptr, int prodflag, int smoothflag, float
555 smooth_factor)
556 {
557 /* DWI_dset is input dataset */
558 /* flag2D3D is flag for dimensionality of gradient */
559 /* maskptr is pointer to mask array to mask data - null if no mask */
560 /* prodflag is productflag whether to simply compute du/dx and du/dy or du/dx^2,
561 du/dx*du/dy, du/dy^2 */
562 /* gradient matrix is returned as MRI_IMARR (2 or 3 sub-briks for 2D case)*/
563
564 /* edge points and masked points are treated equivalently */
565 /* with a test for the index of each node in the kernels and
566 the central voxels themselves */
567
568 /* du/dx is calculated with 3x3 kernel for 2D as */
569 /* -a 0 a v0 0 v3 */
570 /* -b 0 b v1 0 v4 */
571 /* -a 0 a v2 0 v5*/
572 /* where a=3/16, b= 10/16 */
573
574 /* du/dy is calculated with 3x3 kernel for 2D as */
575 /* c d c v0 v1 v3 */
576 /* 0 0 0 0 0 0 */
577 /* -c -d -c v2 v4 v5 */
578 /* where c=3/16, d= 10/16 */
579
580 /* for 3d, instead of alternating rows and columns, */
581 /* use alternating planes in direction (p+1) - (p-1) for du/dx */
582 /* a b a a b a r+1 */
583 /* b c b - b c b r */
584 /* a b a a b a r-1 */
585 /* q-1 q q+1 */
586 /* where a = 0.02, b=0.06,c =0.18 */
587 /* two vertical planes before and after the current voxel for du/dx */
588 /* two horizontal planes above and below the current voxel for du/dy */
589 /* two slices before and after the current voxel for du/dz */
590
591 MRI_IMARR *Gradient_Im;
592 MRI_IMAGE *im, *data_im;
593 byte *tempmaskptr;
594
595 float *ar,*gptr[6];
596 static double a, b, c, d;
597 double dudx, dudy, dudz, dv0, dv1=0.0, dv2=0.0;
598 float v0=0.0, v1=0.0, v2=0.0, v3, v4=0.0, v5=0.0, v6=0.0, v7=0.0, v8=0.0, temp;
599 int nx, ny, nz, nxy, nxyz, i, ii, nbriks, nout;
600 int vx, vy,vz, vi, baseoffset, yp1xp1, yp1xm1, nxp1, nxm1, nym1, nzm1;
601 float *blur_data = NULL;
602 float *vptr, *vptr0, *vptr1, *vptr2, *vptr3, *vptr5, *vptr6, *vptr7, *vptr8;
603 float dz;
604 int maskflag;
605
606 float v9, v10=0.0, v11=0.0, v12, v13=0.0, v14=0.0, v15, v16=0.0, v17=0.0, v18=0.0;
607 float v19=0.0, v20=0.0, v21, v22=0.0, v23=0.0, v24=0.0, v25=0.0, v26=0.0;
608 float dv0600, dv0701=0.0, dv0802=0.0, dv1509, dv1610=0.0, dv1711=0.0, dv2418, dv2519=0.0, dv2620=0.0;
609 float sv1824, sv1925=0.0, sv2026=0.0, sv0006, sv0107=0.0, sv0208=0.0, dv2103, dv2204=0.0, dv2305;
610 /*float dx = 1.0;*/ /* delta x - assume cubical voxels for now */
611
612 ENTRY("Compute_Gradient_Matrix");
613
614 /* test with old code here - remove */
615 /*Gradient_Im = Compute_Gradient_Matrix_v1(DWI_dset, flag2D3D, maskptr, prodflag);*/
616 /* RETURN(Gradient_Im);*/
617 /*****************************************/
618
619 tempmaskptr = maskptr;
620 /* set up constants for kernel */
621 if(flag2D3D==2) {
622 a = 0.1875; /* (2.0 * dx); */ /*3/16;*/
623 b = 0.625; /* (2.0 * dx);*/ /* 10/16;*/
624 c = 0.1875;
625 d = 0.625;
626
627 if(prodflag)
628 nout = 3;
629 else
630 nout = 2;
631 }
632 else {
633 a = 0.02;
634 b = 0.06;
635 c = 0.18;
636 if(prodflag)
637 nout = 6;
638 else
639 nout = 3;
640 }
641
642 /** load the grid parameters **/
643 data_im = DSET_BRICK (DWI_dset, 0);
644 nx = data_im->nx; ny = data_im->ny; nz = data_im->nz; nxyz = data_im->nxyz;
645 nxy = nx * ny;
646 nbriks = DWI_dset->dblk->nvals;
647 /* precompute offsets for each stencil point relative to the center point */
648 yp1xp1 = nxp1 = nx + 1;
649 yp1xm1 = nxm1 = nx - 1;
650 nym1 = ny - 1;
651 nzm1 = nz - 1;
652 maskflag = 0;
653
654 /* make new Image Array to hold gradients and then gradient products */
655 INIT_IMARR(Gradient_Im);
656 for(i=0;i<nout; i++) { /* create 3 sub-briks for du/dx^2, du/dx*du/dy and du/dy^2 */
657 im = mri_new_vol(nx, ny, nz, MRI_float);
658 if(im==NULL) {
659 ERROR_message("can not create temporary data storage");
660 RETURN(NULL);
661 }
662 ADDTO_IMARR(Gradient_Im, im);
663 }
664
665
666
667 if(smoothflag) {
668 blur_data = malloc(nxyz*sizeof(float));
669 if(blur_data==NULL) {
670 ERROR_message("Error - could not allocate memory for gradient");
671 exit(1);
672 }
673 }
674
675 if(flag2D3D == 2) /* for 2D, don't smooth in Z direction */
676 dz = 0.0f;
677 else
678 dz = 1.0f;
679
680 if(flag2D3D==2) {
681 for(i=0;i<nbriks; i++) { /* for each sub-brik in dataset */
682 data_im = DSET_BRICK (DWI_dset, i); /* set pointer to the ith sub-brik of the dataset */
683 ar = (float *) mri_data_pointer(data_im) ;
684 if(smoothflag) {
685 memcpy(blur_data, ar, nxyz*sizeof(float));
686 EDIT_blur_volume( nx,ny,nz, 1.0f,1.0f,dz, MRI_float, blur_data, smooth_factor ) ;
687 ar = blur_data;
688 }
689 /* reset the output gradient pointers after each sub-brik */
690 for(ii=0;ii<nout;ii++) {
691 im = (Gradient_Im->imarr[ii]);
692 gptr[ii] =(float *) mri_data_pointer(im);
693 }
694 baseoffset = 0;
695 vptr = vptr0 = vptr1 = ar;
696 tempmaskptr = maskptr;
697
698 for(vz=0;vz<nz;vz++) {
699 for(vy=0;vy<ny;vy++) {
700 for(vx=0;vx<nx;vx++) {
701 if(maskptr) {
702 maskflag = *tempmaskptr++;
703 if (!maskflag) { /* check if point is in mask or not */
704 baseoffset++;
705 vptr0++;
706 vptr1++;
707 vptr++;
708 for(ii=0;ii<nout;ii++)
709 gptr[ii]++;
710 continue;
711 }
712 /* edge of mask treat special if value in mask is 2 and not 1*/
713 }
714
715 if((maskflag==2) || (vx<1) || (vy<=1) || (vx==nxm1) || (vy==nym1)){ /* special cases at edges */
716 /* get voxels for 3x3 stencil */
717 vptr = ar + baseoffset;
718 v4 = *vptr++; /* get central value at voxel and move pointer to right */
719 /* set first row of 3 voxels and vptr0 */
720 if(vy==0) {
721 v0 = v1 = v2 = v4; /* all are orig voxel value, don't need vptr0*/
722 }
723 else {
724 v0 = vox_val(vx-1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
725 v1 = vox_val(vx, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
726 v2 = vox_val(vx+1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
727 vptr0 = vptr - nx; /* init pointer for first row */
728 }
729
730 /* middle row of voxels */
731 v3 = vox_val(vx-1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
732 v5 = vox_val(vx+1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
733
734 if(vy==nym1) {
735 v6 = v7 = v8 = v4;/* all are orig voxel value, don't need vptr1*/
736 }
737 else {
738 v6 = vox_val(vx-1, vy+1, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
739 v7 = vox_val(vx, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
740 v8 = vox_val(vx+1, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
741 vptr1 = vptr + nx; /* init pointer for third row */
742 }
743
744 dv0 = v6 - v0;
745 dv1 = v7 - v1;
746 dv2 = v8 - v2;
747 }
748 else { /* x>=2, y>=2 */
749 v0 = v1;
750 v1 = v2;
751 v2 = *(++vptr0);
752
753 v3 = v4;
754 v4 = v5;
755 v5 = *(++vptr);
756
757 /* row after voxel */
758 v6 = v7;
759 v7 = v8;
760 v8 = *(++vptr1);
761 dv0 = dv1;
762 dv1 = dv2;
763 dv2 = v8 - v2;
764 }
765
766 dudy = a*(dv0 + dv2) + b* dv1;
767 dudx = a*(v2+v8-v0-v6) + b*(v5-v3);
768
769 if(prodflag) {
770 *(gptr[0]) += dudx * dudx; /* sum gradient product components in output image array */
771 *(gptr[1]) += dudx * dudy;
772 *(gptr[2]) += dudy * dudy;
773 gptr[2]++;
774 }
775 else {
776 *(gptr[0]) += dudx; /* sum gradient components in output image array */
777 *(gptr[1]) += dudy;
778 }
779 gptr[0]++;
780 gptr[1]++;
781 baseoffset++;
782 } /* x loop */
783 } /* y loop*/
784 } /* z loop */
785 } /* sub-brick loop */
786 }
787 else {
788 /* 3D case */
789 /* get 9 row pointers this time and fill 27 values */
790 /* this time each slice gets 3 row pointers, but we need z-1, z, z+1 slices
791 v0-v8 are voxel values in z-1 slice, v9-v17 in slice z, v18-v26 in slice z+1*/
792 /*
793 v0 v1 v2 v9 v10 v11 v18 v19 v20
794 v3 v4 v5 v12 v13 v14 v21 v22 v23
795 v6 v7 v8 v15 v16 v17 v24 v25 v26
796 z-1 z z+1
797 */
798
799 for(i=0;i<nbriks; i++) { /* for each sub-brik in dataset */
800 data_im = DSET_BRICK (DWI_dset, i); /* set pointer to the ith sub-brik of the dataset */
801 ar = (float *) mri_data_pointer(data_im) ;
802 if(smoothflag) {
803 memcpy(blur_data, ar, nxyz*sizeof(float));
804 EDIT_blur_volume( nx,ny,nz, 1.0f,1.0f,dz, MRI_float, blur_data, smooth_factor ) ;
805 ar = blur_data;
806 }
807 /* reset the output gradient pointers after each sub-brik */
808 for(ii=0;ii<nout;ii++) {
809 im = (Gradient_Im->imarr[ii]);
810 gptr[ii] =(float *) mri_data_pointer(im);
811 }
812 baseoffset = 0;
813 vptr = vptr0 = vptr1 = vptr2 = vptr3 = vptr5 = vptr6 = vptr7 = vptr8 = ar;
814 tempmaskptr = maskptr;
815
816
817 for(vz=0;vz<nz;vz++) {
818 for(vy=0;vy<ny;vy++) {
819 for(vx=0;vx<nx;vx++) {
820 if(maskptr) {
821 maskflag = *tempmaskptr++;
822 if (!maskflag) { /* check if point is in mask or not */
823 baseoffset++;
824 vptr0++;
825 vptr1++;
826 vptr2++;
827 vptr3++;
828 vptr++;
829 vptr5++;
830 vptr6++;
831 vptr7++;
832 vptr8++;
833 for(ii=0;ii<nout;ii++)
834 gptr[ii]++;
835 continue;
836 }
837 /* edge of mask treat special if value in mask is 2 and not 1*/
838 }
839
840
841 if((maskflag==2) || (vx<1) || (vy<=1) || (vx==nxm1) || (vy==nym1) || (vz<=1)
842 || (vz==nzm1)){ /* special cases at edges */
843 /* get voxels for 3x3 stencil in central slice as before */
844 vptr = ar + baseoffset;
845 v13 = *vptr++; /* get central value at voxel and move pointer to right */
846 /* set first row of 3 voxels and vptr0 */
847 if(vy==0) {
848 v0 = v1 = v2 = v9 = v10 = v11 = v18 = v19 = v20 = v13; /* all are orig voxel value, don't need vptr0*/
849 vptr0 = vptr3 = vptr6 = vptr;
850 }
851 else {
852 v9 = vox_val(vx-1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
853 v10 = vox_val(vx, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
854 v11 = vox_val(vx+1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
855 vptr3 = vptr - nx; /* init pointer for first row */
856 }
857
858 /* middle row of voxels */
859 v12 = vox_val(vx-1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
860 v14 = vox_val(vx+1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
861
862 if(vy==nym1) {
863 v6 = v7 = v8 = v15 = v16 = v17 = v24 = v25 = v26 = v13;/* all are orig voxel value, don't need vptr1*/
864 vptr5 = vptr2 = vptr8 = vptr;
865 }
866 else {
867 v15 = vox_val(vx-1, vy+1, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
868 v16 = vox_val(vx, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
869 v17 = vox_val(vx+1, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
870 vptr5 = vptr + nx; /* init pointer for third row */
871 }
872
873
874 /* now get values from z-1 slice */
875 /* get voxels for 3x3 stencil in central slice as before */
876 if(vz==0){
877 v0 = v1 = v2 = v3 = v4 = v5 = v6 =v7 = v8 = v13;
878 vptr0 = vptr3;
879 vptr1 = vptr;
880 vptr2 = vptr5;
881 }
882 else {
883 vptr1 = vptr - nxy;
884 /* set first row of 3 voxels and vptr0 */
885 if(vy!=0) {
886 v0 = vox_val(vx-1, vy-1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
887 v1 = vox_val(vx, vy-1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
888 v2 = vox_val(vx+1, vy-1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
889 vptr0 = vptr1 - nx; /* init pointer for first row */
890 }
891
892 /* middle row of voxels */
893 v3 = vox_val(vx-1, vy, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
894 v4 = vox_val(vx, vy, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
895 v5 = vox_val(vx+1, vy, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
896
897 if(vy!=nym1) {
898 v6 = vox_val(vx-1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
899 v7 = vox_val(vx, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
900 v8 = vox_val(vx+1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
901 vptr2 = vptr1 + nx; /* init pointer for third row */
902 }
903 }
904 /* now get values from z+1 slice */
905 if(vz==nzm1){ /* last slice in volume */
906 v18 = v19 = v20 = v21 = v22 = v23 = v24 =v25 = v26 = v13;
907 vptr6 = vptr3;
908 vptr7 = vptr;
909 vptr8 = vptr5;
910 }
911 else {
912 vptr7 = vptr + nxy;
913 /* set first row of 3 voxels and vptr0 */
914 if(vy!=0) {
915 v18 = vox_val(vx-1, vy-1, vz+1, ar, nx, ny, nz, maskptr, vx,vy,vz);
916 v19 = vox_val(vx, vy-1, vz+1, ar, nx, ny, nz, maskptr, vx,vy,vz);
917 v20 = vox_val(vx+1, vy-1, vz+1, ar, nx, ny, nz, maskptr, vx,vy,vz);
918 vptr6 = vptr7 - nx; /* init pointer for first row */
919 }
920
921 /* middle row of voxels */
922 v21 = vox_val(vx-1, vy, vz+1, ar, nx, ny, nz, maskptr, vx, vy, vz);
923 v22 = vox_val(vx, vy, vz+1, ar, nx, ny, nz, maskptr, vx, vy, vz);
924 v23 = vox_val(vx+1, vy, vz+1, ar, nx, ny, nz, maskptr, vx, vy, vz);
925
926 if(vy!=nym1) {
927 v24 = vox_val(vx-1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
928 v25 = vox_val(vx, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
929 v26 = vox_val(vx+1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
930 vptr8 = vptr7 + nx; /* init pointer for third row */
931 }
932 }
933
934 /* initialize sliding differences for dudy */
935 dv0600 = v6 - v0;
936 dv0701 = v7 - v1;
937 dv1509 = v15 - v9;
938 dv1610 = v16 - v10;
939 dv2418 = v24 - v18;
940 dv2519 = v25 - v19;
941
942 /* initialize sliding sums for dudz */
943 sv1824 = v18 + v24;
944 sv1925 = v19 + v25;
945 sv0006 = v0 + v6;
946 sv0107 = v1 + v7;
947 dv2103 = v21 - v3;
948 dv2204 = v22 - v4;
949 dv2305 = v23 - v5; /* mod 08-27-2008 drg*/
950 }
951
952 else { /* x>=1, y>=2 */
953 /* z-1 slice */
954 v0 = v1;
955 v1 = v2;
956 v2 = *(++vptr0);
957
958 v3 = v4;
959 v4 = v5;
960 v5 = *(++vptr1);
961
962 v6 = v7;
963 v7 = v8;
964 v8 = *(++vptr2);
965 /*z slice */
966 v9 = v10;
967 v10 = v11;
968 v11 = *(++vptr3);
969
970 v12 = v13;
971 v13 = v14;
972 v14 = *(++vptr);
973
974 v15 = v16;
975 v16 = v17;
976 v17 = *(++vptr5);
977 /* z+1 slice */
978 v18 = v19;
979 v19 = v20;
980 v20 = *(++vptr6);
981
982 v21 = v22;
983 v22 = v23;
984 v23 = *(++vptr7);
985
986 v24 = v25;
987 v25 = v26;
988 v26 = *(++vptr8);
989
990 /* slide differences for dudy */
991 dv0600 = dv0701;
992 dv0701 = dv0802;
993 dv1509 = dv1610;
994 dv1610 = dv1711;
995 dv2418 = dv2519;
996 dv2519 = dv2620;
997
998 /* slide sums for dudz */
999 sv1824 = sv1925;
1000 sv1925 = sv2026;
1001 sv0006 = sv0107;
1002 sv0107 = sv0208;
1003 dv2103 = dv2204;
1004 dv2204 = dv2305;
1005 }
1006
1007 /* compute new sliding sums and differences */
1008 dv0802 = v8 - v2;
1009 dv1711 = v17 - v11;
1010 dv2620 = v26 - v20;
1011
1012 sv2026 = v20 + v26;
1013 sv0208 = v2 + v8;
1014 dv2204 = v22 - v4;
1015 dv2305 = v23 - v5; /* mod -drg oops for 3D case, missed this one*/
1016
1017 /*
1018 v0 v1 v2 v9 v10 v11 v18 v19 v20
1019 v3 v4 v5 v12 v13 v14 v21 v22 v23
1020 v6 v7 v8 v15 v16 v17 v24 v25 v26
1021 z-1 z z+1
1022 */
1023 /* du/dx across alternating planes left and right of current voxel */
1024 /* corners of planes */
1025 /* centers of edges of planes */
1026 /* centers of sides - adjacent in x-1, x+1 in same slice */
1027
1028 /* dudx = a * (v2 + v20 + v8 + v26 - v0 - v18 - v6 -v24) + \
1029 b * (v11 + v5 + v23 + v17 - v9 - v3 -v21 - v15) + \
1030 c * (v14 - v12);*/
1031 dudx = a * (sv0208 + sv2026 - sv0006 - sv1824) + \
1032 b * (v11 + v5 + v23 + v17 - v9 - v3 -v21 - v15) + \
1033 c * (v14 - v12);
1034
1035 /* dudy = a * (v6 + v8 + v24 + v26 - v0 - v2 - v18 - v20) + \
1036 b * (v7 + v15 + v17 + v25 - v1 - v9 - v11 - v19) + \
1037 c * (v16 - v10) ; */
1038
1039 dudy = a * (dv0600 + dv0802 + dv2418 + dv2620) + \
1040 b * (dv0701 + dv1509 + dv1711 + dv2519) + \
1041 c * dv1610;
1042
1043 /* dudz = a * (v18 + v20 + v24 + v26 - v0 - v2 - v6 - v8) + \
1044 b * (v19 + v21 + v23 + v25 - v1 - v3 - v5 - v7) + \
1045 c * (v22 - v4);*/
1046 dudz = a * (sv1824 + sv2026 - sv0006 - sv0208) + \
1047 b * (sv1925 + dv2103 + dv2305 - sv0107) + \
1048 c * dv2204;
1049
1050 if(prodflag) {
1051 *(gptr[0]) += dudx * dudx; /* sum gradient product components in output image array */
1052 *(gptr[1]) += dudx * dudy;
1053 *(gptr[2]) += dudx * dudz;
1054 *(gptr[3]) += dudy * dudy;
1055 *(gptr[4]) += dudy * dudz;
1056 *(gptr[5]) += dudz * dudz;
1057 }
1058 else {
1059 *(gptr[0]) += dudx; /* sum gradient components in output image array */
1060 *(gptr[1]) += dudy;
1061 *(gptr[2]) += dudz;
1062 }
1063 baseoffset++;
1064 for(ii=0;ii<nout;ii++)
1065 gptr[ii]++; /*and increment pointers*/
1066 } /* x */
1067 } /* y */
1068 } /* z */
1069 } /* brick loop */
1070 } /* end 3D case */
1071
1072
1073 /* final normalization (mean) and check for being very close to zero */
1074 /* reset the output gradient pointers after each sub-brik */
1075 for(ii=0;ii<nout;ii++) {
1076 im = (Gradient_Im->imarr[ii]);
1077 gptr[ii] =(float *) mri_data_pointer(im);
1078 }
1079 for(vi=0;vi<nxyz;vi++) {
1080 for(ii=0;ii<nout;ii++) {
1081 *gptr[ii] = *gptr[ii] / nbriks;/* normalize gradient for number of briks*/
1082 temp = fabs(*gptr[ii]);
1083 if(temp<TINYNUMBER)
1084 *gptr[ii] = 0.0;
1085 gptr[ii]++; /*and increment pointers*/
1086 }
1087 }
1088 if(smoothflag)
1089 free(blur_data);
1090
1091 RETURN(Gradient_Im);
1092 }
1093
1094
1095
1096 /* compute numerical gradients for each voxel and compose matrix for smoothing
1097 including [du/dx du/dy] for single volume MRI_IMAGE */
1098 MRI_IMARR *
Compute_Gradient_Matrix_Im(MRI_IMAGE * SourceIm,int flag2D3D,byte * maskptr,int xflag,int yflag,int zflag)1099 Compute_Gradient_Matrix_Im(MRI_IMAGE *SourceIm, int flag2D3D, byte *maskptr, int xflag, int yflag, int zflag)
1100 {
1101 /* SourceIm is input volume */
1102 /* flag2D3D is flag for dimensionality of gradient */
1103 /* maskptr is pointer to mask array to mask data - null if no mask */
1104 /* xflag - compute and return dU/dx */
1105 /* yflag - compute and return dU/dy */
1106 /* gradient matrix is returned as MRI_IMARR (1 or 2 sub-briks for 2D case)*/
1107
1108 /* edge points and masked points are treated equivalently */
1109 /* with a test for the index of each node in the kernels and
1110 the central voxels themselves */
1111
1112 /* du/dx is calculated with 3x3 kernel for 2D as */
1113 /* -a 0 a v0 0 v3 */
1114 /* -b 0 b v1 0 v4 */
1115 /* -a 0 a v2 0 v5*/
1116 /* where a=3/16, b= 10/16 */
1117
1118 /* du/dy is calculated with 3x3 kernel for 2D as */
1119 /* c d c v0 v1 v2 */
1120 /* 0 0 0 0 0 0 */
1121 /* -c -d -c v3 v4 v5 */
1122 /* where c=3/16, d= 10/16 */
1123
1124 MRI_IMARR *Gradient_Im;
1125 MRI_IMAGE *im;
1126
1127 byte *tempmaskptr;
1128 float *ar,*gptr[3];
1129 double a, b, c, d;
1130 double dudx, dudy, dudz;
1131
1132 float *vptr, *vptr0, *vptr1, *vptr2, *vptr3, *vptr5, *vptr6, *vptr7, *vptr8;
1133 float v0=0.0, v1=0.0, v2=0.0, v3=0.0, v4=0.0, v5=0.0,v6=0.0,v7=0.0,v8=0.0;
1134 float v9, v10=0.0, v11=0.0, v12, v13=0.0, v14=0.0, v15, v16=0.0, v17=0.0, v18=0.0;
1135 float v19=0.0, v20=0.0, v21, v22=0.0, v23=0.0, v24=0.0, v25=0.0, v26=0.0;
1136 float dv0600=0.0, dv0701=0.0, dv0802=0.0, dv1509=0.0, dv1610=0.0, dv1711=0.0, dv2418=0.0, dv2519=0.0, dv2620=0.0;
1137 float sv1824=0.0, sv1925=0.0, sv2026=0.0, sv0006=0.0, sv0107=0.0, sv0208=0.0, dv2103=0.0, dv2204=0.0, dv2305=0.0;
1138
1139 float dv0,dv1=0.0,dv2=0.0, temp;
1140 int nx, ny, nz, i, ii, nout, noutm1, nxm1, nym1, nzm1;
1141 char maskflag;
1142 int yp1xp1, yp1xm1, nxy, nxyz, baseoffset;
1143 int vx,vy,vz;
1144 int ybrik=0;
1145
1146 /* float dx = 1.0; */ /* delta x - assume cubical voxels for now */
1147
1148 ENTRY("Compute_Gradient_Matrix_Im");
1149
1150 /* set up constants for kernel */
1151 a = 0.1875; /* / (2.0 * dx);*/ /*3/16;*/
1152 b = 0.625; /* / (2.0 * dx);*/ /* 10/16;*/
1153 c = 0.1875;
1154 d = 0.625;
1155
1156 maskflag = 0;
1157 nout = 0;
1158 if(xflag)
1159 nout++;
1160 if(yflag)
1161 ybrik = nout++;
1162 if(zflag)
1163 nout++;
1164 if(nout==0) {
1165 ERROR_message("Nothing to compute in Compute_Gradient_Matrix_Im");
1166 RETURN(NULL);
1167 }
1168
1169 noutm1 = nout - 1;
1170 /** load the grid parameters **/
1171 nx = SourceIm->nx ;
1172 ny = SourceIm->ny;
1173 nz = SourceIm->nz ;
1174 nxyz = SourceIm->nxyz;
1175 nxy = nx * ny;
1176 yp1xp1 = nx + 1;
1177 yp1xm1 = nxm1 = nx - 1;
1178 nym1 = ny - 1;
1179 nzm1 = nz - 1;
1180
1181 /* make new Image Array to hold gradients and then gradient products */
1182 INIT_IMARR(Gradient_Im);
1183 for(i=0;i<nout; i++) { /* create sub-briks for output gradient */
1184 im = mri_new_vol(nx, ny, nz, MRI_float);
1185 if(im==NULL) {
1186 ERROR_message("can not create temporary data storage");
1187 RETURN(NULL);
1188 }
1189 ADDTO_IMARR(Gradient_Im, im);
1190 }
1191
1192
1193 for(ii=0;ii<nout;ii++) {
1194 im = (Gradient_Im->imarr[ii]);
1195 gptr[ii] = (float *) mri_data_pointer(im);
1196 }
1197
1198 ar = mri_data_pointer(SourceIm);
1199 baseoffset = 0;
1200 vptr = vptr0 = vptr1 = vptr2 = vptr3 = vptr5 = vptr6 = vptr7 = vptr8 = ar;
1201 tempmaskptr = maskptr;
1202
1203 #if 1
1204 if(flag2D3D==2) { /* 2D option */
1205 for(vz=0;vz<nz;vz++) {
1206 for(vy=0;vy<ny;vy++) {
1207 for(vx=0;vx<nx;vx++) {
1208 if(maskptr){ /* check if point is in mask or not */
1209 maskflag = *tempmaskptr++;
1210 if(!maskflag) {
1211 baseoffset++;
1212 vptr++;
1213 vptr0++;
1214 vptr1++;
1215 for(ii=0;ii<nout;ii++)
1216 gptr[ii]++;
1217 continue;
1218 }
1219 }
1220
1221 if((maskflag==2) || (vx<1) || (vx==nxm1) || (vy<=1) || (vy==nym1)){ /* special cases at edges */
1222 /* get voxels for 3x3 stencil */
1223 vptr = (float *) ar + baseoffset;
1224 v4 = *vptr++; /* get central value at voxel and move pointer to right */
1225 /* set first row of 3 voxels and vptr0 */
1226 if(vy==0) {
1227 v0 = v1 = v2 = v4; /* all are orig voxel value, don't need vptr0*/
1228 }
1229 else {
1230 v0 = vox_val(vx-1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1231 v1 = vox_val(vx, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1232 v2 = vox_val(vx+1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1233 vptr0 = vptr - nx; /* init pointer for first row */
1234 }
1235 if(xflag) {
1236 /* middle row of voxels */
1237 v3 = vox_val(vx-1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
1238 v5 = vox_val(vx+1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
1239 }
1240 if(vy==nym1) {
1241 v6 = v7 = v8 = v4;/* all are orig voxel value, don't need vptr1*/
1242 }
1243 else {
1244 v6 = vox_val(vx-1, vy+1, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
1245 v7 = vox_val(vx, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1246 v8 = vox_val(vx+1, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1247 vptr1 = vptr + nx; /* init pointer for third row */
1248 }
1249
1250 if(yflag) {
1251 dv0 = v6 - v0;
1252 dv1 = v7 - v1;
1253 dv2 = v8 - v2;
1254 }
1255 }
1256 else {
1257 /* row before voxel */
1258 v0 = v1;
1259 v1 = v2;
1260 v2 = *(++vptr0);
1261 if(xflag) {
1262 /* same row as voxel */
1263 v3 = v4;
1264 v4 = v5;
1265 v5 = *(++vptr);
1266 }
1267 /* row after voxel */
1268 v6 = v7;
1269 v7 = v8;
1270 v8 = *(++vptr1);
1271 }
1272
1273
1274 if(xflag) {
1275 dudx = a*(v2+v8-v0-v6) + b*(v5-v3);
1276 temp = fabs(dudx);
1277 if(temp>=TINYNUMBER)
1278 *(gptr[0]) = dudx; /* sum gradient components in output image array */
1279 /*else
1280 *gptr[0] = 0.0; */ /* already zeroed out when allocated */
1281
1282 }
1283
1284 if(yflag) {
1285 dv0 = dv1;
1286 dv1 = dv2;
1287 dv2 = v8 - v2;
1288 dudy = a*(dv0 + dv2) + b* dv1;
1289 temp = fabs(dudy);
1290 if(temp>=TINYNUMBER)
1291 *(gptr[noutm1]) = dudy;
1292 /*else
1293 *(gptr[noutm1]) = 0.0;*/ /* already zeroed out when allocated */
1294 }
1295
1296 for(ii=0;ii<nout;ii++) {
1297 gptr[ii]++;
1298 }
1299 baseoffset++;
1300 }
1301 }
1302 }
1303
1304 } /* end if 2D */
1305
1306
1307 else {
1308 /* 3D case */
1309 /* get 9 row pointers this time and fill 27 values */
1310 /* this time each slice gets 3 row pointers, but we need z-1, z, z+1 slices
1311 v0-v8 are voxel values in z-1 slice, v9-v17 in slice z, v18-v26 in slice z+1*/
1312 /*
1313 v0 v1 v2 v9 v10 v11 v18 v19 v20
1314 v3 v4 v5 v12 v13 v14 v21 v22 v23
1315 v6 v7 v8 v15 v16 v17 v24 v25 v26
1316 z-1 z z+1
1317 */
1318 for(vz=0;vz<nz;vz++) {
1319 for(vy=0;vy<ny;vy++) {
1320 for(vx=0;vx<nx;vx++) {
1321 if(maskptr) {
1322 maskflag = *tempmaskptr++;
1323 if (!maskflag) { /* check if point is in mask or not */
1324 baseoffset++;
1325 vptr0++;
1326 vptr1++;
1327 vptr++;
1328 vptr2++;
1329 vptr3++;
1330 vptr5++;
1331 vptr6++;
1332 vptr7++;
1333 vptr8++;
1334 for(ii=0;ii<nout;ii++)
1335 gptr[ii]++;
1336 continue;
1337 }
1338 /* edge of mask treat special if value in mask is 2 and not 1*/
1339 }
1340
1341
1342 if((maskflag==2) || (vx<1) || (vy<=1) || (vx==nxm1) || (vy==nym1) || (vz<=1)
1343 || (vz==nzm1)){ /* special cases at edges */
1344 /* get voxels for 3x3 stencil in central slice as before */
1345 vptr = ar + baseoffset;
1346 v13 = *vptr++; /* get central value at voxel and move pointer to right */
1347 /* set first row of 3 voxels and vptr0 */
1348 if(vy==0) {
1349 v0 = v1 = v2 = v9 = v10 = v11 = v18 = v19 = v20 = v13; /* all are orig voxel value, don't need vptr0*/
1350 vptr0 = vptr3 = vptr6 = vptr;
1351 }
1352 else {
1353 v9 = vox_val(vx-1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1354 v10 = vox_val(vx, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1355 v11 = vox_val(vx+1, vy-1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1356 vptr3 = vptr - nx; /* init pointer for first row */
1357 }
1358
1359 /* middle row of voxels */
1360 v12 = vox_val(vx-1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
1361 v14 = vox_val(vx+1, vy, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
1362
1363 if(vy==nym1) {
1364 v6 = v7 = v8 = v15 = v16 = v17 = v24 = v25 = v26 = v13;/* all are orig voxel value, don't need vptr1*/
1365 vptr5 = vptr2 = vptr8 = vptr;
1366 }
1367 else {
1368 v15 = vox_val(vx-1, vy+1, vz, ar, nx, ny, nz, maskptr, vx, vy, vz);
1369 v16 = vox_val(vx, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1370 v17 = vox_val(vx+1, vy+1, vz, ar, nx, ny, nz, maskptr, vx,vy,vz);
1371 vptr5 = vptr + nx; /* init pointer for third row */
1372 }
1373
1374
1375 /* now get values from z-1 slice */
1376 /* get voxels for 3x3 stencil in central slice as before */
1377 if(vz==0){
1378 v0 = v1 = v2 = v3 = v4 = v5 = v6 =v7 = v8 = v13;
1379 vptr0 = vptr3;
1380 vptr1 = vptr;
1381 vptr2 = vptr5;
1382 }
1383 else {
1384 vptr1 = vptr - nxy;
1385 /* set first row of 3 voxels and vptr0 */
1386 if(vy!=0) {
1387 v0 = vox_val(vx-1, vy-1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1388 v1 = vox_val(vx, vy-1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1389 v2 = vox_val(vx+1, vy-1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1390 vptr0 = vptr1 - nx; /* init pointer for first row */
1391 }
1392
1393 /* middle row of voxels */
1394 v3 = vox_val(vx-1, vy, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1395 v4 = vox_val(vx, vy, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1396 v5 = vox_val(vx+1, vy, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1397
1398 if(vy!=nym1) {
1399 v6 = vox_val(vx-1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1400 v7 = vox_val(vx, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1401 v8 = vox_val(vx+1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1402 vptr2 = vptr1 + nx; /* init pointer for third row */
1403 }
1404 }
1405 /* now get values from z+1 slice */
1406 if(vz==nzm1){ /* last slice in volume */
1407 v18 = v19 = v20 = v21 = v22 = v23 = v24 =v25 = v26 = v13;
1408 vptr6 = vptr3;
1409 vptr7 = vptr;
1410 vptr8 = vptr5;
1411 }
1412 else {
1413 vptr7 = vptr + nxy;
1414 /* set first row of 3 voxels and vptr0 */
1415 if(vy!=0) {
1416 v18 = vox_val(vx-1, vy-1, vz+1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1417 v19 = vox_val(vx, vy-1, vz+1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1418 v20 = vox_val(vx+1, vy-1, vz+1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1419 vptr6 = vptr7 - nx; /* init pointer for first row */
1420 }
1421
1422 /* middle row of voxels */
1423 v21 = vox_val(vx-1, vy, vz+1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1424 v22 = vox_val(vx, vy, vz+1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1425 v23 = vox_val(vx+1, vy, vz+1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1426
1427 if(vy!=nym1) {
1428 v24 = vox_val(vx-1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx, vy, vz);
1429 v25 = vox_val(vx, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1430 v26 = vox_val(vx+1, vy+1, vz-1, ar, nx, ny, nz, maskptr, vx,vy,vz);
1431 vptr8 = vptr7 + nx; /* init pointer for third row */
1432 }
1433 }
1434
1435
1436 /* initialize sliding differences for dudy */
1437 if(yflag) {
1438 dv0600 = v6 - v0;
1439 dv0701 = v7 - v1;
1440 dv1509 = v15 - v9;
1441 dv1610 = v16 - v10;
1442 dv2418 = v24 - v18;
1443 dv2519 = v25 - v19;
1444 }
1445
1446 /* initialize sliding sums for dudz */
1447 if((xflag)||(zflag)) {
1448 sv2026 = v20 + v26; /* for dudx, dudz */
1449 sv0208 = v2 + v8; /* for dudx, dudz */
1450 if(zflag) {
1451 sv1824 = v18 + v24;
1452 sv1925 = v19 + v25;
1453 sv0006 = v0 + v6;
1454 sv0107 = v1 + v7;
1455 dv2103 = v21 - v3;
1456 dv2204 = v22 - v4;
1457 dv2305 = v23 - v5; /* mod - 08-27-2008 drg */
1458 }
1459 }
1460 }
1461
1462 else { /* x>=1, y>=2 */
1463 /* z-1 slice */
1464 v0 = v1;
1465 v1 = v2;
1466 v2 = *(++vptr0);
1467
1468 v3 = v4;
1469 v4 = v5;
1470 v5 = *(++vptr1);
1471
1472 v6 = v7;
1473 v7 = v8;
1474 v8 = *(++vptr2);
1475 /*z slice */
1476 v9 = v10;
1477 v10 = v11;
1478 v11 = *(++vptr3);
1479
1480 v12 = v13;
1481 v13 = v14;
1482 v14 = *(++vptr);
1483
1484 v15 = v16;
1485 v16 = v17;
1486 v17 = *(++vptr5);
1487 /* z+1 slice */
1488 v18 = v19;
1489 v19 = v20;
1490 v20 = *(++vptr6);
1491
1492 v21 = v22;
1493 v22 = v23;
1494 v23 = *(++vptr7);
1495
1496 v24 = v25;
1497 v25 = v26;
1498 v26 = *(++vptr8);
1499
1500 /* slide differences for dudy */
1501 if(yflag) {
1502 dv0600 = dv0701;
1503 dv0701 = dv0802;
1504 dv1509 = dv1610;
1505 dv1610 = dv1711;
1506 dv2418 = dv2519;
1507 dv2519 = dv2620;
1508 }
1509 /* slide sums for dudz */
1510 if((zflag) || (xflag)) {
1511 sv1824 = sv1925; /* for dudx, dudz */
1512 sv1925 = sv2026; /* for dudx, dudz */
1513
1514 sv0006 = sv0107; /* for dudx, dudz */
1515 sv0107 = sv0208; /* for dudx, dudz */
1516 sv2026 = v20 + v26; /* for dudx, dudz */
1517 sv0208 = v2 + v8; /* for dudx, dudz */
1518
1519 if(zflag) {
1520 dv2103 = dv2204;
1521 dv2204 = dv2305;
1522 dv2204 = v22 - v4; /* for dudz */
1523 dv2305 = v23 - v5; /* mod - 08-27-2008 drg */
1524 }
1525
1526 }
1527
1528 }
1529
1530
1531 /*
1532 v0 v1 v2 v9 v10 v11 v18 v19 v20
1533 v3 v4 v5 v12 v13 v14 v21 v22 v23
1534 v6 v7 v8 v15 v16 v17 v24 v25 v26
1535 z-1 z z+1
1536 */
1537 /* du/dx across alternating planes left and right of current voxel */
1538 /* corners of planes */
1539 /* centers of edges of planes */
1540 /* centers of sides - adjacent in x-1, x+1 in same slice */
1541
1542 /* dudx = a * (v2 + v20 + v8 + v26 - v0 - v18 - v6 -v24) + \
1543 b * (v11 + v5 + v23 + v17 - v9 - v3 -v21 - v15) + \
1544 c * (v14 - v12);*/
1545 if(xflag) {
1546 dudx = a * (sv0208 + sv2026 - sv0006 - sv1824) + \
1547 b * (v11 + v5 + v23 + v17 - v9 - v3 -v21 - v15) + \
1548 c * (v14 - v12);
1549 temp = fabs(dudx);
1550 if(temp>=TINYNUMBER)
1551 *(gptr[0]) = dudx; /* sum gradient components in output image array */
1552 /*else
1553 *gptr[0] = 0.0; */ /* already zeroed out when allocated */
1554 }
1555
1556 /* dudy = a * (v6 + v8 + v24 + v26 - v0 - v2 - v18 - v20) + \
1557 b * (v7 + v15 + v17 + v25 - v1 - v9 - v11 - v19) + \
1558 c * (v16 - v10) ; */
1559 if(yflag) {
1560 dv0802 = v8 - v2; /* for dudy */
1561 dv1711 = v17 - v11; /* for dudy */
1562 dv2620 = v26 - v20; /* for dudy */
1563
1564 dudy = a * (dv0600 + dv0802 + dv2418 + dv2620) + \
1565 b * (dv0701 + dv1509 + dv1711 + dv2519) + \
1566 c * dv1610;
1567 temp = fabs(dudy);
1568 if(temp>=TINYNUMBER)
1569 *(gptr[ybrik]) = dudy; /* sum gradient components in output image array */
1570 /*else *gptr[0] = 0.0; */ /* already zeroed out when allocated */
1571 }
1572
1573 /* dudz = a * (v18 + v20 + v24 + v26 - v0 - v2 - v6 - v8) + \
1574 b * (v19 + v21 + v23 + v25 - v1 - v3 - v5 - v7) + \
1575 c * (v22 - v4);*/
1576 if(zflag) {
1577 dudz = a * (sv1824 + sv2026 - sv0006 - sv0208) + \
1578 b * (sv1925 + dv2103 + dv2305 - sv0107) + \
1579 c * dv2204;
1580 temp = fabs(dudz);
1581 if(temp>=TINYNUMBER)
1582 *(gptr[noutm1]) = dudz; /* sum gradient components in output image array */
1583 /*else *gptr[0] = 0.0; */ /* already zeroed out when allocated */
1584
1585 *(gptr[noutm1]) = dudz;
1586 }
1587
1588 baseoffset++;
1589 for(ii=0;ii<nout;ii++)
1590 gptr[ii]++;
1591 } /* x */
1592 } /* y */
1593 } /* z */
1594 } /* end 3D case */
1595
1596 #endif
1597
1598
1599 RETURN(Gradient_Im);
1600 }
1601
1602 /*! get voxel value at x,y,z from image but limit by dimensions and mask */
vox_val(int x,int y,int z,float * imptr,int nx,int ny,int nz,byte * maskptr,int i,int j,int k)1603 static INLINE float vox_val(int x,int y,int z,float *imptr, int nx, int ny, int nz, byte *maskptr, int i, int j, int k)
1604 {
1605 float voxval;
1606 int offset;
1607 /* get voxel values within limits of 0 to nx-1 and 0 to ny-1*/
1608 /* if value is not inside mask use value at i, j, k instead */
1609
1610
1611 #define max(a,b) ((a) > (b) ? (a) : (b))
1612 #define min(a,b) (((a) < (b)) ? (a) : (b))
1613
1614 x = min(x, (nx-1));
1615 x = max(x,0);
1616
1617 y = min(y, (ny-1));
1618 y = max(y, 0);
1619
1620 z = min(z, (nz-1));
1621 z = max(z, 0);
1622
1623 offset = nx*(z*ny+y) + x;
1624 /* put mask check here too */
1625 if((maskptr!=NULL) && !(*(maskptr+offset))) /* if not in mask use i,j,k offset*/
1626 offset = nx*(k*ny+j) + i;
1627 voxval = *(imptr+offset);
1628
1629 /*define VOX_VAL(x,y,offset,nx, ny) \
1630 (*((offset) + min(max((y),0),(ny-1))*(nx) + min(max((x),0),(nx-1)))<)*/
1631
1632 return(voxval);
1633 }
1634
1635
1636 /* Compute eigenvectors and eigenvalues for Gradient matrix */
Eig_Gradient(MRI_IMARR * Gradient_Im,int flag2D3D,byte * maskptr)1637 MRI_IMARR *Eig_Gradient(MRI_IMARR *Gradient_Im, int flag2D3D, byte *maskptr)
1638 {
1639 MRI_IMARR *Eig_Im;
1640 MRI_IMAGE *im;
1641 byte *tempmaskptr;
1642
1643 float *gptr[12];
1644 int ii, jj, starti, endi;
1645 register float a1, a2, a3, aa2;
1646 double a13, rad, L1, L2, x11, x12, x21, x22;
1647 int nx, ny, nz, nxyz;
1648 float maxim, tempmax0, tempmax2;
1649 double almostzero;
1650 double aa[9], e[3];
1651
1652 ENTRY("Eig_Gradient");
1653 tempmaskptr = maskptr;
1654 im = Gradient_Im->imarr[0];
1655 nx = im->nx; ny = im->ny; nz = im->nz; nxyz = im->nxyz;
1656
1657 /* will use Gradient_Im structure and data space in place already for
1658 eigenvalues and eigenvectors ) */
1659 if(flag2D3D==2) {
1660 starti = 3;
1661 endi = 6;
1662 }
1663 else {
1664 starti = 6;
1665 endi = 12;
1666 }
1667
1668 /* for 2D, need 6 sub-briks in output mri_imarr-2 eigenvalues,4 vector components */
1669 for(ii=starti;ii<endi; ii++) { /* add 3 sub-briks to the current mri_imarr for each original sub-brik*/
1670 im = mri_new_vol(nx, ny, nz, MRI_float);
1671 if(im==NULL) {
1672 ERROR_message("can not create temporary data storage");
1673 RETURN(NULL);
1674 }
1675 ADDTO_IMARR(Gradient_Im, im);
1676 }
1677
1678 for(ii=0;ii<endi;ii++) {
1679 im = (Gradient_Im->imarr[ii]);
1680 gptr[ii] = (float *) mri_data_pointer(im);
1681 }
1682
1683 /* find max Sxx, Syy in gradients */
1684 im = (Gradient_Im->imarr[0]);
1685 tempmax0 = Find_Max_Im(im, maskptr); /* max Sxx */
1686 if(flag2D3D==2)
1687 im = (Gradient_Im->imarr[2]);
1688 else
1689 im = (Gradient_Im->imarr[3]);
1690 tempmax2 = Find_Max_Im(im, maskptr); /* max Syy */
1691
1692 if(tempmax0>tempmax2)
1693 maxim = tempmax0;
1694 else
1695 maxim = tempmax2;
1696
1697 if(flag2D3D==3) {
1698 im = (Gradient_Im->imarr[5]);
1699 tempmax2 = Find_Max_Im(im, maskptr); /* max Szz */
1700 if(tempmax2>maxim)
1701 maxim = tempmax2;
1702 }
1703
1704 almostzero = maxim / 100000.0;
1705
1706 for(ii=0;ii<nxyz;ii++) {
1707 if(maskptr && (!(*tempmaskptr++))) {
1708
1709 for(jj=0;jj<endi;jj++) {
1710 *gptr[jj] = 0.0;
1711 }
1712 }
1713 else
1714 {
1715
1716 /* for 2D case, solve by "hand" */
1717
1718 if(flag2D3D==2) {
1719
1720 a1 = *gptr[0];
1721 a2 = *gptr[1];
1722 aa2 = abs(a2);
1723 a3 = *gptr[2];
1724 a13 = a1 + a3;
1725 rad = sqrt(4.0*a2*a2 +((a1-a3)*(a1-a3)));
1726 L1 = (a13 + rad) / 2.0;
1727 L2 = (a13 - rad) / 2.0;
1728 if(aa2<=almostzero) {
1729 if(a1<=a3) {
1730 x11 = 0.0;
1731 x12 = 1.0;
1732 x21 = 1.0;
1733 x22 = 0.0;
1734 }
1735 else {
1736 x11 = 1.0;
1737 x12 = 0.0;
1738 x21 = 0.0;
1739 x22 = 1.0;
1740 }
1741 }
1742 else {
1743 rad = (L1-a1)/a2;
1744 x11 = sqrt(1/(1+rad*rad));
1745 x12 = x11 * rad;
1746 rad = (L2-a1)/a2;
1747 x21 = sqrt(1/(1+rad*rad));
1748 x22 = x21 * rad;
1749 }
1750 /* overwriting gradient values in 3 sub-briks here */
1751 a1 = abs(L1);
1752 a2 = abs(L2);
1753 if(a1>=a2) {
1754 *gptr[0] = L1;
1755 *gptr[1] = L2;
1756 *gptr[2] = x11;
1757 *gptr[3] = x12;
1758 *gptr[4] = x21;
1759 *gptr[5] = x22;
1760 }
1761 else {
1762 *gptr[0] = L2;
1763 *gptr[1] = L1;
1764 *gptr[2] = x21;
1765 *gptr[3] = x22;
1766 *gptr[4] = x11;
1767 *gptr[5] = x12;
1768 }
1769
1770 }
1771 else { /* 3d flag */
1772 aa[0] = *gptr[0];
1773 aa[1] = *gptr[1];
1774 aa[2] = *gptr[2];
1775
1776 aa[3] = *gptr[1];
1777 aa[4] = *gptr[3];
1778 aa[5] = *gptr[4];
1779
1780 aa[6] = *gptr[2];
1781 aa[7] = *gptr[4];
1782 aa[8] = *gptr[5];
1783
1784
1785 symeig_double(3, aa, e); /* e has eigenvalues in result, aa has eigenvectors */
1786
1787 /* add check for small eigenvalues here */
1788
1789
1790 /* reverse the order in gptr to give maximum value first */
1791 *gptr[0] = e[2]; /* eigenvalues */
1792 *gptr[1] = e[1];
1793 *gptr[2] = e[0];
1794
1795 *gptr[3] = aa[6]; /* eigenvectors */
1796 *gptr[4] = aa[7];
1797 *gptr[5] = aa[8];
1798
1799 *gptr[6] = aa[3];
1800 *gptr[7] = aa[4];
1801 *gptr[8] = aa[5];
1802
1803 *gptr[9] = aa[0];
1804 *gptr[10] = aa[1];
1805 *gptr[11] = aa[2];
1806 }
1807
1808
1809 } /* not masked */
1810 for(jj=0;jj<endi;jj++) {
1811 if(isnan(*gptr[jj]))
1812 *gptr[jj] = 0.0;
1813 gptr[jj]++; /* increment pointers for next voxel */
1814 }
1815
1816 }
1817
1818 Eig_Im = Gradient_Im;
1819 RETURN(Eig_Im);
1820 }
1821
Find_Max_Im(MRI_IMAGE * im,byte * maskptr)1822 float Find_Max_Im(MRI_IMAGE *im, byte *maskptr)
1823 {
1824 int i, nxyz;
1825 float *gptr;
1826 float t1, max_im;
1827 byte *tempmaskptr;
1828
1829 ENTRY("Find_Max_Im");
1830
1831 tempmaskptr = maskptr;
1832 max_im = 0.0;
1833 nxyz = im->nxyz;
1834 gptr = (float *) mri_data_pointer(im);
1835 for(i=0;i<nxyz;i++) {
1836 if(maskptr && (!*tempmaskptr++))
1837 gptr++;
1838 else {
1839 t1 = *gptr;
1840 gptr++;
1841 if(t1>max_im)
1842 max_im = t1;
1843 }
1844
1845 }
1846 RETURN(max_im);
1847 }
1848
1849 /* compute inverted eigenvalue matrix */
Compute_Phi(MRI_IMARR * EV_Im,int flag2D3D,byte * maskptr)1850 MRI_IMARR *Compute_Phi(MRI_IMARR *EV_Im, int flag2D3D, byte *maskptr)
1851 {
1852 MRI_IMARR *phi_Im;
1853 MRI_IMAGE *im;
1854 double c1 = 0.01f, c2 = -0.01f;
1855 /* c2 = -1.00,*/
1856 double mc1 = 0.99, evensplit, secondsplit;
1857 double e1, e2,e3, e12, a, b, emax;
1858 /* e1me2;*/
1859 float *gptr[3];
1860 int nxyz, ii, jj, endi;
1861 byte *tempmaskptr;
1862
1863 ENTRY("Compute_Phi");
1864
1865 if(flag2D3D==2) {
1866 evensplit = 0.5;
1867 secondsplit = 1.0;
1868 }
1869 else {
1870 evensplit = 1.0/3.0;
1871 secondsplit = 0.5;
1872 }
1873 endi = flag2D3D;
1874
1875 im = EV_Im->imarr[0];
1876 nxyz = im->nxyz;
1877 tempmaskptr = maskptr;
1878
1879 /* replace first two eigenvalue sub-briks with phi values */
1880 for(ii=0;ii<endi;ii++) {
1881 im = (EV_Im->imarr[ii]);
1882 gptr[ii] = (float *) mri_data_pointer(im);
1883 }
1884
1885
1886 /* Ding method phi1,2 = c/e1, c/e2 */
1887 if(compute_method==0) {
1888 emax = Find_Max_Im(EV_Im->imarr[0], maskptr);
1889 a = emax / 100.0;
1890 b = 1.0 / a;
1891
1892 for(ii=0;ii<nxyz;ii++) {
1893 if(maskptr && !*tempmaskptr++) {
1894 for(jj=0;jj<endi;jj++) {
1895 *gptr[jj] = 0.0;
1896 }
1897 }
1898 else {
1899 e1 = *gptr[0];
1900 e2 = *gptr[1];
1901
1902 if(e1<=0.0) { /* e1 equal or close to zero */
1903 for(jj=0;jj<endi;jj++) {
1904 *gptr[jj] = evensplit; /* phi values all equal - 2D 0.5,0.5 or for 3D 0.33, 0.33, 0.33 */
1905 }
1906 }
1907 else {
1908 if(e2<=0.0) { /* e2 equal or close to zero */
1909 *gptr[0] = 0.0;
1910 for(jj=1;jj<endi;jj++)
1911 *gptr[jj] = secondsplit; /* remaining phi value - 2D 0 1 or for 3D 0 0.5 0.5 */
1912 }
1913 else {
1914 e1 = 1.0/e1;
1915 e2 = 1.0/e2;
1916 e12 = e1 + e2;
1917 if(flag2D3D==3) {
1918 e3 = *gptr[2];
1919 if(e3<=0) { /* zero e3 - phi for 3D is 0 0 1 */
1920 e3 = 1.0;
1921 e1 = 0.0;
1922 e2 = 0.0;
1923 e12 = 1.0;
1924 }
1925 else { /* normal case phi- e1 / sum(1/e1+1/e2+1/e3) */
1926 e3 = 1.0 / e3;
1927 e12 += e3;
1928 }
1929 *gptr[2] = e3 / e12;
1930 }
1931 *gptr[0] = e1 / e12;
1932 *gptr[1] = e2 / e12;
1933 }
1934 }
1935 } /* included in mask */
1936 for(jj=0;jj<endi;jj++)
1937 gptr[jj]++;
1938 } /* end for each voxel in volume */
1939 } /* end Ding method */
1940 else { /* use exponential method instead */
1941 for(ii=0;ii<nxyz;ii++) {
1942 if(maskptr && !*tempmaskptr++) {
1943 for(jj=0;jj<endi;jj++)
1944 *gptr[jj] = 0.0;
1945 }
1946 else {
1947 e1 = *gptr[0];
1948 e2 = *gptr[1];
1949 if(flag2D3D==3) { /* 3D case */
1950 e3 = *gptr[2];
1951 e12 = e1 + e2 + e3;
1952 if(e12<TINYNUMBER) { /* if very small numbers or zero */
1953 e1 = e2 =e3 = evensplit; /* set to be all equal = 1/3 */
1954 }
1955 else { /* scale eigenvalues to sum to 1 */
1956 e1 = e1 / e12;
1957 e2 = e2 / e12;
1958 e3 = e3 / e12;
1959 }
1960 *gptr[0] = c1;
1961 if(e1==e2) {
1962 *gptr[1] = c1;
1963 }
1964 else {
1965 e12 = e1 - e2;
1966 e12 *= e12;
1967 *gptr[1] = 0.01 + (0.99 * exp (c2/e12));
1968 }
1969
1970 if(e1==e3) {
1971 *gptr[2] = c1;
1972 }
1973 else {
1974 e12 = e1 - e3;
1975 e12 *= e12;
1976 *gptr[2] = 0.01 + (0.99 * exp (c2/e12));
1977 }
1978 }
1979 else { /* 2D case */
1980 e12 = e1 + e2;
1981 if(e12<TINYNUMBER)
1982 e1 = e2 = evensplit;
1983 else {
1984 e1 = e1 / e12;
1985 e2 = e2 / e12;
1986 }
1987 if(e1==e2)
1988 *gptr[1] = c1;
1989 else {
1990 e12 = (e1-e2);
1991 e12 *= e12;
1992 *gptr[1] = c1 + (mc1 * exp(c2 / e12) );
1993 }
1994 *gptr[0] = c1;
1995 } /* end in 2D */
1996 } /* end in mask */
1997 gptr[0]++; gptr[1]++;
1998 if(flag2D3D==3) gptr[2]++;
1999 }
2000 }
2001
2002
2003
2004 #if 0
2005 /* fractional anisotropy method phi1,2 = 1/(e1-e2), 0.01 */
2006 emax = 0.0;
2007 for(ii=0;ii<nxyz;ii++) {
2008 /* for 2D case, solve by "hand" */
2009 e1 = *gptr[0];
2010 e2 = *gptr[1];
2011 e1me2 = e1-e2;
2012 if(e1me2>=a)
2013 e1 = 1/e1me2;
2014 else
2015 e1 = -9999.0;
2016 *gptr[0] = e1;
2017 *gptr[1] = 0.01;
2018 gptr[0]++; gptr[1]++;
2019 if(e1>emax)
2020 emax = e1;
2021 }
2022
2023 im = (EV_Im->imarr[0]);
2024 gptr[0] = (float *) mri_data_pointer(im);
2025
2026 for(ii=0;ii<nxyz;ii++) {
2027 e1 = *gptr[0];
2028 if(e1==-9999.0) {
2029 *gptr[0] = emax;
2030 }
2031 gptr[0]++;
2032 }
2033 #endif
2034
2035 #if 0
2036 if(e2!=0.0)
2037 e2 = 1/e2;
2038 else
2039 e2 = 1.0;
2040 if(e1==e2)
2041 e1 = e2 = 0.5;
2042 else {
2043 a = 1/(e1+e2);
2044 e1 *= a;
2045 e2 *= a;
2046 }
2047 #endif
2048
2049
2050 #if 0
2051 if(e1==e2)
2052 *gptr[0] = c1;
2053 else {
2054 e12 = (e1-e2);
2055 e12 *= e12;
2056 *gptr[0] = c1 + (mc1 * exp(c2 / e12) );
2057 }
2058 *gptr[1] = c1;
2059 gptr[0]++; gptr[1]++;
2060
2061 #endif
2062 phi_Im = EV_Im;
2063 RETURN(phi_Im);
2064 }
2065
2066 /* compute FA, MD, Cl, Cp, Cs, and more */
Compute_DiffMeasures(MRI_IMARR * EV_Im,int flag2D3D,byte * maskptr,THD_3dim_dataset * grid_dset,float * cen)2067 THD_3dim_dataset *Compute_DiffMeasures(MRI_IMARR *EV_Im, int flag2D3D,
2068 byte *maskptr,
2069 THD_3dim_dataset *grid_dset, float *cen)
2070 {
2071 THD_3dim_dataset *dout=NULL;
2072 MRI_IMARR *TP_Im=NULL;
2073 MRI_IMAGE *im=NULL;
2074 int nxyz, ii, jj, endi, nout = 6, vi, vj, vk;
2075 float *inptr[12], *outptr[nout], e0, e1, e2, tr, d,
2076 v0[3], nV, nR, R[3], xyz[3];
2077
2078 byte *tempmaskptr;
2079
2080 ENTRY("Compute_DiffMeasures");
2081
2082 if(flag2D3D==2) {
2083 ERROR_message("Not ready for 2D form, complain heartily to D. Glen!");
2084 RETURN(NULL);
2085 }
2086
2087 if (!EV_Im || !grid_dset) {
2088 ERROR_message("Lousy input!");
2089 RETURN(NULL);
2090 }
2091
2092 /* Centroid */
2093 if (!cen) {
2094 ERROR_message("I need me a centroid");
2095 RETURN(NULL);
2096 }
2097
2098 endi = flag2D3D;
2099
2100 im = EV_Im->imarr[0];
2101 nxyz = im->nxyz;
2102 tempmaskptr = maskptr;
2103
2104 /*Prepare for output */
2105 INIT_IMARR(TP_Im);
2106 for(ii=0;ii<nout; ii++) {
2107 im = mri_new_vol(im->nx, im->ny, im->nz, MRI_float);
2108 if(im==NULL) {
2109 ERROR_message("cannot create data storage");
2110 RETURN(NULL);
2111 }
2112 ADDTO_IMARR(TP_Im, im);
2113 }
2114
2115 /* Get eigen values and 1st eigen vector direction
2116 This is not set to work for 2D smoothing! */
2117 for(ii=0;ii<endi+3;ii++) {
2118 im = (EV_Im->imarr[ii]);
2119 inptr[ii] = (float *) mri_data_pointer(im);
2120 }
2121
2122 /* init output pointers */
2123 for(ii=0;ii<nout;ii++) {
2124 im = (TP_Im->imarr[ii]);
2125 outptr[ii] = (float *) mri_data_pointer(im);
2126 }
2127
2128 /* Compute nout params */
2129 for(ii=0;ii<nxyz;ii++) {
2130 if(maskptr && !*tempmaskptr++) {
2131 for(jj=0;jj<nout;jj++) {
2132 *outptr[jj] = 0.0;
2133 }
2134 } else {
2135 e0 = *inptr[0];
2136 e1 = *inptr[1];
2137 e2 = *inptr[2];
2138 v0[0] = *inptr[3];
2139 v0[1] = *inptr[4];
2140 v0[2] = *inptr[5];
2141
2142 tr = (e0+e1+e2);
2143 /* FA */
2144 if ((d = (e0*e0+e1*e1+e2*e2))>0.0f) {
2145 *outptr[0] = sqrt(0.5*( (e0-e1)*(e0-e1)+
2146 (e1-e2)*(e1-e2)+
2147 (e0-e2)*(e0-e2) )/ d );
2148 if (*outptr[0] > 1.0) *outptr[0] = 1.0;
2149 } else *outptr[0] = 0.0;
2150
2151 /* MD */
2152 *outptr[1] = (tr)/3.0;
2153 if (*outptr[1] < 0.0) *outptr[1] = 0.0;
2154 /* Cl */
2155 if (tr > 0.0) {
2156 *outptr[2] = (e0-e1)/tr;
2157 if (*outptr[2] > 1.0) *outptr[2] = 1.0;
2158 } else *outptr[2] = 0.0;
2159
2160 /* Cp */
2161 if (tr > 0.0) {
2162 *outptr[3] = 2.0*(e1-e2)/tr;
2163 if (*outptr[3] > 1.0) *outptr[3] = 1.0;
2164 } else *outptr[3] = 0.0;
2165 /* Cs */
2166 if (tr > 0.0) {
2167 *outptr[4] = 3.0*e2/tr;
2168 if (*outptr[4] < 0.0) *outptr[4] = 0.0;
2169 } else *outptr[4] = 0.0;
2170 /* Dot product of v0 with direction from center */
2171 if (tr > 0.0) {
2172 AFNI_1D_to_3D_index(ii, vi, vj, vk,
2173 DSET_NX(grid_dset),
2174 DSET_NX(grid_dset)*DSET_NY(grid_dset));
2175 AFNI_ijk_to_xyz( grid_dset ,
2176 vi, vj, vk,
2177 xyz, xyz+1 , xyz+2 );
2178 /* nV should be 1 already ... */
2179 nV = sqrt(v0[0]*v0[0]+v0[1]*v0[1]+v0[2]*v0[2]);
2180 R[0] = xyz[0]-cen[0];
2181 R[1] = xyz[1]-cen[1];
2182 R[2] = xyz[2]-cen[2];
2183 nR = sqrt(R[0]*R[0]+R[1]*R[1]+R[2]*R[2]);
2184 #if 0
2185 if (ii==3949365) {
2186 fprintf(stderr,"Debugging for voxel [%d %d %d] %d\n"
2187 " R = [%f %f %f] %f, cen = [%f %f %f]\n"
2188 "Principal V = [%f %f %f] %f, Principal e = %f\n"
2189 "Eigs = [%f %f %f]\n"
2190 "dot = %f\n",
2191 vi, vj, vk, ii,
2192 R[0], R[1], R[2], nR, cen[0], cen[1], cen[2],
2193 v0[0], v0[1], v0[2], nV, e0,
2194 e0, e1, e2,
2195 (R[0]*v0[0]+R[1]*v0[1]+R[2]*v0[2])/(nV*nR));
2196
2197 }
2198 #endif
2199 if ((nV = nV*nR) != 0.0) {
2200 *outptr[5] = (R[0]*v0[0]+R[1]*v0[1]+R[2]*v0[2])/nV;
2201 } else *outptr[5] = 0.0;
2202 } else *outptr[5] = 0.0;
2203 } /* included in mask */
2204
2205 /* increment to next voxel */
2206 for(jj=0;jj<endi+3;jj++) inptr[jj]++;
2207 for(jj=0;jj<nout;jj++) outptr[jj]++;
2208 } /* end for each voxel in volume */
2209
2210 /* make nice dset */
2211 dout = Copy_IMARR_to_dset(grid_dset, TP_Im, "Diff_Measures");
2212 tross_Copy_History (grid_dset, dout);
2213 EDIT_dset_items(dout,ADN_brick_label_one + 0,"FA",ADN_none);
2214 EDIT_dset_items(dout,ADN_brick_label_one + 1,"MD",ADN_none);
2215 EDIT_dset_items(dout,ADN_brick_label_one + 2,"Cl",ADN_none);
2216 EDIT_dset_items(dout,ADN_brick_label_one + 3,"Cp",ADN_none);
2217 EDIT_dset_items(dout,ADN_brick_label_one + 4,"Cs",ADN_none);
2218 EDIT_dset_items(dout,ADN_brick_label_one + 5,"Rdot",ADN_none);
2219 EDIT_dset_items(dout ,
2220 ADN_malloc_type , DATABLOCK_MEM_MALLOC , /* store in memory */
2221 ADN_prefix , "Diff_Measures",
2222 ADN_none ) ;
2223
2224 RETURN(dout);
2225 }
2226
2227
2228 /* Compute the D diffusion tensor for the dataset */
ComputeDTensor(MRI_IMARR * phi_Im,int flag2D3D,byte * maskptr)2229 MRI_IMARR *ComputeDTensor(MRI_IMARR *phi_Im, int flag2D3D, byte *maskptr)
2230 {
2231 MRI_IMARR *DTensor;
2232 MRI_IMAGE *im;
2233 double v[4], a1, a2;
2234 int nxyz, ii, i, endi;
2235 float *gptr[12], *tempptr;
2236 byte *tempmaskptr;
2237 double e10, e11, e12, e20, e21, e22, e30, e31, e32;
2238 double l1, l2, l3;
2239 double t1, t3, t5, t8, t10, t12, t14, t18, t19, t21, t23, t32, t33, t35,t37;
2240
2241
2242
2243 ENTRY("ComputeDTensor");
2244 /* D = V Phi VT */
2245 im = phi_Im->imarr[0];
2246 nxyz = im->nxyz;
2247 tempmaskptr = maskptr;
2248
2249 if(flag2D3D==2)
2250 endi = 6;
2251 else
2252 endi = 12;
2253 /* replace first three phi,eigenvector sub-briks with Dxx,Dxy,Dyy values */
2254 for(ii=0;ii<endi;ii++) {
2255 im = (phi_Im->imarr[ii]);
2256 gptr[ii] = (float *) mri_data_pointer(im);
2257 }
2258
2259
2260 if(flag2D3D==2) {
2261 for(ii=0;ii<nxyz;ii++) {
2262 if(maskptr && !(*tempmaskptr++)) {
2263 for(i=0;i<endi;i++) {
2264 tempptr = gptr[i];
2265 *tempptr = 0.0;
2266 gptr[i]++;
2267 }
2268 }
2269 else {
2270 /* for 2D case, solve by "hand" */
2271 a1 = *gptr[0];
2272 a2 = *gptr[1];
2273 v[0] = *gptr[2]; /* don't increment this one, use this one in-place */
2274 for(i=1;i<4;i++){
2275 v[i] = *gptr[i+2];
2276 }
2277 *gptr[0] = (v[0] * v[0] * a1)+ (v[2] * v[2] * a2);
2278 *gptr[1] = (v[0] * v[1] * a1)+ (v[2] * v[3] * a2);
2279 *gptr[2] = (v[1] * v[1] * a1)+ (v[3] * v[3] * a2);
2280
2281 for(i=0;i<endi;i++) {
2282 gptr[i]++;
2283 }
2284 }
2285 }
2286 }
2287 else { /* 3D option */
2288
2289 for(ii=0;ii<nxyz;ii++) {
2290
2291 if(maskptr && !(*tempmaskptr++)) {
2292 for(i=0;i<endi;i++) {
2293 tempptr = gptr[i];
2294 *tempptr = 0.0;
2295 }
2296 }
2297 else { /* use maple generated code to calculate V Phi Vt */
2298 l1 = *gptr[0];
2299 l2 = *gptr[1];
2300 l3 = *gptr[2];
2301
2302 e10 = *gptr[3];
2303 e11 = *gptr[4];
2304 e12 = *gptr[5];
2305
2306 e20 = *gptr[6];
2307 e21 = *gptr[7];
2308 e22 = *gptr[8];
2309
2310 e30 = *gptr[9];
2311 e31 = *gptr[10];
2312 e32 = *gptr[11];
2313
2314 t1 = e10 * e10;
2315 t3 = e20 * e20;
2316 t5 = e30 * e30;
2317 t8 = e10 * l1;
2318 t10 = e20 * l2;
2319 t12 = e30 * l3;
2320 t14 = t8 * e11 + t10 * e21 + t12 * e31;
2321 t18 = t8 * e12 + t10 * e22 + t12 * e32;
2322 t19 = e11 * e11;
2323 t21 = e21 * e21;
2324 t23 = e31 * e31;
2325 t32 = e11 * l1 * e12 + e21 * l2 * e22 + e31 * l3 * e32;
2326 t33 = e12 * e12;
2327 t35 = e22 * e22;
2328 t37 = e32 * e32;
2329 *gptr[0] = t1 * l1 + t3 * l2 + t5 * l3;
2330 *gptr[1] = t14;
2331 *gptr[2] = t18;
2332 *gptr[3] = t19 * l1 + t21 * l2 + t23 * l3;
2333 *gptr[4] = t32;
2334 *gptr[5] = t33 * l1 + t35 * l2 + t37 * l3;
2335 }
2336 for(i=0;i<endi;i++) {
2337 gptr[i]++;
2338 }
2339 }
2340 }
2341
2342 DTensor = phi_Im;
2343
2344 RETURN(DTensor);
2345 }
2346
2347
2348 /* copy IMARR (image array) to new_dset using base_dset as the model */
2349 THD_3dim_dataset *
Copy_IMARR_to_dset(THD_3dim_dataset * base_dset,MRI_IMARR * Imptr,char * new_prefix)2350 Copy_IMARR_to_dset(THD_3dim_dataset * base_dset,MRI_IMARR *Imptr, char *new_prefix)
2351 {
2352 THD_3dim_dataset * new_dset;
2353 int i;
2354
2355 ENTRY("Copy_IMARR_to_dset");
2356
2357 /*-- make the empty copy --*/
2358 new_dset = EDIT_empty_copy( base_dset ) ;
2359
2360 THD_init_datablock_labels( new_dset->dblk ) ;
2361 EDIT_dset_items( new_dset ,
2362 ADN_malloc_type , DATABLOCK_MEM_MALLOC , /* store in memory */
2363 ADN_prefix , new_prefix ,
2364 ADN_label1 , new_prefix ,
2365 ADN_nvals , Imptr->num , /* # sub-bricks */
2366 ADN_ntt , 0 , /* # time points */
2367 ADN_datum_all , MRI_float , /* atomic datum */
2368 ADN_none ) ;
2369
2370 THD_init_datablock_keywords( new_dset->dblk ) ;
2371 THD_init_datablock_stataux( new_dset->dblk ) ;
2372
2373
2374 /* new_dset->dblk->brick = Imptr;*/ /* update pointer to data */
2375 new_dset->dblk->nvals = Imptr->num;
2376
2377 for(i=0;i<Imptr->num; i++) { /* for each sub-brik in dataset */
2378 /* Imptr->imarr[i]->kind = MRI_float;*/
2379 EDIT_substitute_brick(new_dset,i, MRI_float, mri_data_pointer(Imptr->imarr[i]));
2380 }
2381
2382
2383 RETURN(new_dset);
2384 }
2385
2386 /* compute maximum and print maximum of each sub-brik in MRI_IMARR data */
2387 /* for debugging */
Compute_IMARR_Max(MRI_IMARR * Imptr)2388 void Compute_IMARR_Max(MRI_IMARR *Imptr)
2389 {
2390 int i,j,nxyz;
2391 float tmax,tt;
2392 float *gptr;
2393 MRI_IMAGE *im;
2394
2395 for(i=0;i<Imptr->num;i++) { /* for each sub-brik */
2396 tmax = TINYNUMBER;
2397 im = Imptr->imarr[i];
2398 gptr = (float *) mri_data_pointer(im);
2399 nxyz = im->nxyz;
2400 for(j=0;j<nxyz;j++){ /* for each voxel of data */
2401 tt = *gptr;
2402 gptr++;
2403 if(tt>tmax)
2404 tmax = tt;
2405 }
2406 printf("max brik %d = %f\n", i, tmax);
2407 }
2408 }
2409