1 /**
2  * afni/src/3dECM.c
3  */
4 
5 /* Look for OpenMP macro */
6 #ifdef USE_OMP
7 #include <omp.h>
8 #endif
9 
10 /* Include libraries */
11 #include "mrilib.h"
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include "sparse_array.h"
15 
16 /* Define constants */
17 #define SPEARMAN 1
18 #define QUADRANT 2
19 #define PEARSON  3
20 #define ETA2     4
21 
22 #define MAX_NUM_TAGS 32
23 #define MAX_TAG_LEN 256
24 
25 #define SQRT_2 ((double) 1.4142135623 )
26 
27 /* CC - variables for tracking memory usage stats */
28 static int MEM_PROF = 0;
29 static int MEM_STAT = 0;
30 static char mem_tags[MAX_NUM_TAGS][MAX_TAG_LEN];
31 static long mem_allocated[MAX_NUM_TAGS];
32 static long mem_freed[MAX_NUM_TAGS];
33 static long mem_num_tags = 0;
34 static long running_mem = 0;
35 static long peak_mem = 0;
36 static long total_mem = 0;
37 
38 /* CC macro for updating mem stats */
39 #define INC_MEM_STATS( INC, TAG ) \
40     { \
41         if( MEM_PROF == 1 ) \
42         { \
43             int ndx = 0; \
44             while( ndx < mem_num_tags ) \
45             { \
46                 if( strncmp( mem_tags[ndx], TAG, MAX_TAG_LEN ) == 0 ) \
47                 { \
48                     break; \
49                 } \
50                 ndx++; \
51             } \
52             if(( ndx >= mem_num_tags ) && (ndx < MAX_NUM_TAGS)) \
53             { \
54                 /* adding a new tag */ \
55                 strncpy( mem_tags[ ndx ], TAG, (MAX_TAG_LEN-1) ); \
56                 mem_allocated[ ndx ] = 0; \
57                 mem_freed[ ndx ] = 0; \
58                 mem_num_tags++; \
59             } \
60             if( ndx < MAX_NUM_TAGS ) \
61             { \
62                 mem_allocated[ ndx ] += (long)(INC); \
63                 if ((long)(INC) > 1024 ) WARNING_message("Incrementing memory for %s by %ldB\n", TAG, (INC)); \
64             } \
65             else WARNING_message("No room in mem profiler for %s\n", TAG ); \
66         } \
67         total_mem += (long)(INC); \
68         running_mem += (long)(INC); \
69         if (running_mem > peak_mem) peak_mem = running_mem; \
70     }
71 
72 #define DEC_MEM_STATS( DEC, TAG ) \
73     { \
74         if( MEM_PROF == 1 ) \
75         { \
76             int ndx = 0; \
77             while( ndx < mem_num_tags ) \
78             { \
79                 if( strncmp( mem_tags[ndx], TAG, MAX_TAG_LEN ) == 0 ) \
80                 { \
81                     break; \
82                 } \
83                 else ndx++ ; \
84             } \
85             if(( ndx >= mem_num_tags ) && (ndx < MAX_NUM_TAGS)) \
86             { \
87                 WARNING_message("Could not find tag %s in mem profiler\n", TAG ); \
88             } \
89             else \
90             { \
91                 mem_freed[ ndx ] += (long)(DEC); \
92                 if ((long)(DEC) > 1024 ) INFO_message("Free %ldB of memory for %s\n", (DEC), TAG); \
93             } \
94         } \
95         running_mem -= (long)(DEC); \
96     }
97 
98 #define PRINT_MEM_STATS( TAG ) \
99         if ( MEM_STAT == 1 ) \
100         { \
101             INFO_message("\n======\n== Mem Stats (%s): Running %3.3fMB, Total %3.3fMB, Peak %3.3fMB\n", \
102             TAG, \
103             (double)(running_mem/(1024.0*1024.0)), \
104             (double)(total_mem/(1024.0*1024.0)), \
105             (double)(peak_mem/(1024.0*1024.0))); \
106             if( MEM_PROF ==  1 ) \
107             { \
108                 int ndx = 0; \
109                 INFO_message("== Memory Profile\n"); \
110                 for( ndx=0; ndx < mem_num_tags; ndx++ ) \
111                 { \
112                     INFO_message("%s: %ld allocated %ld freed\n", mem_tags[ndx], \
113                         mem_allocated[ndx], mem_freed[ndx] ); \
114                 } \
115             } \
116         }
117 
118 
119 /* freeing all of the allocated mem on an error can get a little messy. instead
120    we can use this macro to check what has been allocated and kill it. this of
121    course requires strict discipline for initiazing all pointers to NULL and
122    resetting them to NULL when free'd. i should be able to handle that */
123 #define CHECK_AND_FREE_ALL_ALLOCATED_MEM \
124 { \
125     /* eliminate DSETS */ \
126         if ( mset != NULL ) \
127         { \
128             DSET_unload(mset) ; \
129             DSET_delete(mset) ; \
130             mset = NULL ; \
131         } \
132 \
133         if ( xset != NULL ) \
134         { \
135             DSET_unload(xset) ; \
136             DSET_delete(xset) ; \
137             xset = NULL ; \
138         } \
139 \
140         if ( cset != NULL ) \
141         { \
142             DSET_unload(cset) ; \
143             DSET_delete(cset) ; \
144             cset = NULL ; \
145         } \
146 \
147      /* free the xvectim */ \
148         if ( xvectim != NULL ) \
149         { \
150             VECTIM_destroy(xvectim) ; \
151             xvectim = NULL ; \
152         } \
153 \
154     /* free allocated mems */ \
155         if( mask != NULL ) \
156         { \
157             free(mask) ; \
158             mask = NULL ; \
159         } \
160 \
161         if( eigen_vec != NULL ) \
162         { \
163             free(eigen_vec) ; \
164             eigen_vec = NULL ; \
165         } \
166 }
167 
168 /* being good and cleaning up before erroring out can be a pain, and messy, lets
169    make a variadic macro that does it for us. */
170 #define ERROR_EXIT_CC( ... ) \
171     { \
172         CHECK_AND_FREE_ALL_ALLOCATED_MEM; \
173         ERROR_exit( __VA_ARGS__ ); \
174     }
175 
176 /*----------------------------------------------------------------------------*/
vstep_print(void)177 static void vstep_print(void)
178 {
179    static int nn=0 ;
180    static char xx[10] = "0123456789" ;
181    fprintf(stderr , "%c" , xx[nn%10] ) ;
182    if( nn%10 == 9) fprintf(stderr,",") ;
183    nn++ ;
184 }
185 
186 /*----------------------------------------------------------------*/
187 /**** Include these here for potential optimization for OpenMP ****/
188 /*----------------------------------------------------------------*/
189 /*! Pearson correlation of x[] and y[] (x and y are NOT modified.
190     And we know ahead of time that the time series have 0 mean
191     and L2 norm 1.
192 *//*--------------------------------------------------------------*/
193 
zm_THD_pearson_corr(int n,float * x,float * y)194 float zm_THD_pearson_corr( int n, float *x , float *y ) /* inputs are */
195 {                                                       /* zero mean  */
196    register float xy ; register int ii ;                /* and norm=1 */
197    if( n%2 == 0 ){
198      xy = 0.0f ;
199      for( ii=0 ; ii < n ; ii+=2 ) xy += x[ii]*y[ii] + x[ii+1]*y[ii+1] ;
200    } else {
201      xy = x[0]*y[0] ;
202      for( ii=1 ; ii < n ; ii+=2 ) xy += x[ii]*y[ii] + x[ii+1]*y[ii+1] ;
203    }
204    return xy ;
205 }
206 
cc_pearson_corr(long n,float * x,float * y)207 double cc_pearson_corr( long n, float *x, float*y )
208 {
209     /* index and corr value in processor register for faster access */
210     register int ii;
211     register double xy = (double)0.0;
212     for(ii=0; ii<n; ii++)
213     {
214         xy+=x[ii]*y[ii];
215     }
216     return( xy );
217 }
218 
calc_fecm_power(MRI_vectim * xvectim,double shift,double scale,double eps,long max_iter)219 double* calc_fecm_power(MRI_vectim *xvectim, double shift, double scale, double eps, long max_iter)
220 {
221     /* CC - we need a few arrays for calculating the power method */
222     double* v_prev = NULL;
223     double* v_new = NULL;
224     double* v_temp = NULL;
225     double* xv_int = NULL;
226     double  v_err = 0.0;
227     long    power_it;
228     float*  xsar=NULL;
229     double  v_new_sum_sq = 0.0;
230     double  v_new_norm = 0.0;
231     double  v_prev_sum_sq = 0.0;
232     double  v_prev_norm = 0.0;
233     double  v_prev_sum = 0.0;
234     long    lout,lin,ithr,nthr,vstep,vii ;
235 
236     /* -- CC initialize memory for power iteration */
237     /* -- v_new will hold the new vector as it is being calculated */
238     v_new = (double*)calloc(xvectim->nvec,sizeof(double));
239 
240     if( v_new == NULL )
241     {
242         WARNING_message("Cannot allocate %d bytes for v_new",xvectim->nvec*sizeof(double));
243         return( NULL );
244     }
245 
246     /*-- CC update our memory stats to reflect v_new -- */
247     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
248     PRINT_MEM_STATS( "v_new" );
249 
250     /* -- v_prev will hold the vector from the previous calculation */
251     v_prev = (double*)calloc(xvectim->nvec,sizeof(double));
252 
253     if( v_prev == NULL )
254     {
255         if( v_new != NULL ) free(v_new);
256         WARNING_message("Cannot allocate %d bytes for v_prev",xvectim->nvec*sizeof(double));
257         return( NULL );
258     }
259 
260     /*-- CC update our memory stats to reflect v_prev -- */
261     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_prev");
262     PRINT_MEM_STATS( "v_prev" );
263 
264     /* -- xv_int is an intermediary of the A'v matrix
265           product */
266     xv_int = (double*)calloc(xvectim->nvals,sizeof(double));
267 
268     if( xv_int == NULL )
269     {
270         if( v_new != NULL ) free(v_new);
271         if( v_prev != NULL ) free(v_prev);
272         WARNING_message("Cannot allocate %d bytes for xv_int",
273             xvectim->nvec*sizeof(double));
274         return( NULL );
275     }
276 
277     /*-- CC update our memory stats to reflect xv_int -- */
278     INC_MEM_STATS(xvectim->nvals*sizeof(double), "xv_int");
279     PRINT_MEM_STATS( "xv_int" );
280 
281 
282     /*--- Initialize power method ---*/
283 
284     /*  set the initial vector to the first vector */
285     for ( lout=0; lout < xvectim->nvec; lout++ )
286     {
287         /* ||v_prev|| = 1 */
288         v_prev[lout]=1.0 / sqrt((double)xvectim->nvec);
289         v_prev_sum += v_prev[lout];
290         v_prev_sum_sq += v_prev[lout] * v_prev[lout];
291     }
292 
293     /* Init error */
294     v_prev_norm = sqrt(v_prev_sum_sq);
295     v_err = v_prev_norm;
296 
297     power_it = 0;
298 
299     while( (v_err > eps) && (power_it < max_iter))
300     {
301 
302         ithr = 0 ; nthr = 1 ;
303 
304         /* -- reset the xv_int to zeros --*/
305         bzero(xv_int,xvectim->nvals*sizeof(double));
306 
307         /* -- Calculate xv_int = X'v */
308         for( lout=0 ; lout < xvectim->nvec ; lout++ )
309         {  /*----- outer voxel loop -----*/
310 
311             /* get ref time series from this voxel */
312             xsar = VECTIM_PTR(xvectim,lout) ;
313 
314             for( lin=0; lin<xvectim->nvals; lin++ )
315             {
316                 xv_int[lin] += ((double)xsar[lin]*v_prev[lout]);
317             }
318 
319         } /* for lout */
320 
321         /* now calculate X xv_int */
322         for( lout=0 ; lout < xvectim->nvec ; lout++ )
323         {  /*----- outer voxel loop -----*/
324 
325             /* get ref time series from this voxel */
326             xsar = VECTIM_PTR(xvectim,lout) ;
327 
328             v_new[lout] = scale*shift*v_prev_sum ;
329 
330             for( lin=0; lin<xvectim->nvals; lin++ )
331             {
332                 v_new[lout] +=  scale*xv_int[lin]*xsar[lin];
333             }
334         }
335 
336         /* calculate the error, norms, and sums for the next
337            iteration */
338         v_prev_sum = 0.0;
339         v_new_norm = 0.0;
340         v_new_sum_sq = 0.0;
341         v_err = 0.0;
342 
343         /* calculate the norm of the new vector */
344         for( lout=0 ; lout < xvectim->nvec ; lout++ )
345         {  /*----- outer voxel loop -----*/
346             v_new_sum_sq += v_new[lout]*v_new[lout];
347         }
348         v_new_norm = sqrt(v_new_sum_sq);
349 
350         /* normalize the new vector, calculate the
351            error between this vector and the previous,
352            and get the sum */
353         v_new_sum_sq = 0.0;
354         for( lout=0; lout < xvectim->nvec; lout++ )
355         {
356             double vdiff = 0;
357             v_new[lout] = v_new[lout] / v_new_norm;
358             v_prev_sum += v_new[lout];
359             v_new_sum_sq += v_new[lout]*v_new[lout];
360 
361             /* calculate the differences */
362             vdiff = v_prev[lout] - v_new[lout];
363             v_err += (vdiff * vdiff);
364         }
365 
366         v_err = sqrt(v_err) / v_prev_norm;
367         v_prev_norm = sqrt(v_new_sum_sq);
368 
369         /* now set the new vec to the previous */
370         v_temp = v_prev;
371         v_prev = v_new;
372         v_new = v_temp;
373 
374         /* increment iteration counter */
375         power_it++;
376 
377         /* tell the user what has happened */
378         if((power_it % 10) == 0)
379             INFO_message ("Finished iter %d: Verr %3.3f, Vnorm %3.3f\n",
380                 power_it, v_err, v_prev_norm);
381     }
382 
383     if ((v_err >= eps) && (power_it >= max_iter))
384     {
385         WARNING_message("Power iteration did not converge (%3.3f >= %3.3f)\n"
386             "in %d iterations. You might consider increase max_iters, or\n"
387             "epsilon. For now we are writing out the optained solution,\n"
388             "which may or may not be good enough.\n",
389             (v_err), (eps), (power_it));
390     }
391 
392     /* the eigenvector that we are interested in should now be in v_prev,
393        free all other power iteration temporary vectors */
394     if( v_new != NULL )
395     {
396         free(v_new);
397         v_new = NULL;
398 
399         /* update running memory statistics to reflect freeing the vectim */
400         DEC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
401     }
402 
403     if( xv_int != NULL )
404     {
405         free(xv_int);
406         xv_int = NULL;
407 
408         /* update running memory statistics to reflect freeing the vectim */
409         DEC_MEM_STATS(xvectim->nvals*sizeof(double), "xv_int");
410     }
411 
412     /* return the result */
413     return(v_prev);
414 }
415 
calc_full_power_sparse(MRI_vectim * xvectim,double thresh,double sparsity,double shift,double scale,double eps,long max_iter,long binary,long mem_bytes)416 double* calc_full_power_sparse(MRI_vectim *xvectim, double thresh,
417     double sparsity, double shift, double scale, double eps, long max_iter,
418     long binary, long mem_bytes)
419 {
420     /* CC - we need a few arrays for calculating the power method */
421     double* v_prev = NULL;
422     double* v_new = NULL;
423     double* v_temp = NULL;
424     double* weight_matrix = NULL;
425     long*   ndx_vec = NULL;
426     double  v_err = 0.0;
427     long    power_it;
428     double  v_new_sum_sq = 0.0;
429     double  v_new_norm = 0.0;
430     double  v_prev_sum_sq = 0.0;
431     double  v_prev_norm = 0.0;
432     double  v_prev_sum = 0.0;
433     long    ii = 0;
434     long    nvals = 0;
435     long    wsize = 0;
436 
437     /* sparse array for the weight vector */
438     sparse_array_head_node* sparse_array = NULL;
439     sparse_array_node* tptr;
440 
441     /* -- CC initialize memory for power iteration */
442     /* -- v_new will hold the new vector as it is being calculated */
443     v_new = (double*)calloc(xvectim->nvec,sizeof(double));
444 
445     if( v_new == NULL )
446     {
447         WARNING_message("Cannot allocate %d bytes for v_new",
448            xvectim->nvec*sizeof(double));
449         return( NULL );
450     }
451 
452     /*-- CC update our memory stats to reflect v_new -- */
453     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
454     PRINT_MEM_STATS( "v_new" );
455 
456     /* -- v_prev will hold the vector from the previous calculation */
457     v_prev = (double*)calloc(xvectim->nvec,sizeof(double));
458 
459     if( v_prev == NULL )
460     {
461         if( v_new != NULL ) free(v_new);
462         WARNING_message("Cannot allocate %d bytes for v_prev",
463             xvectim->nvec*sizeof(double));
464         return( NULL );
465     }
466 
467     /*-- CC update our memory stats to reflect v_new -- */
468     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_prev");
469     PRINT_MEM_STATS( "v_prev" );
470 
471 
472     /*---------- loop over mask voxels, correlate ----------*/
473 
474     /*  set the initial vector to the first vector */
475     for ( ii=0; ii < xvectim->nvec; ii++ )
476     {
477         v_prev[ii] = 1.0 / sqrt((double)xvectim->nvec);
478         v_prev_sum += v_prev[ii];
479         v_prev_sum_sq += v_prev[ii] * v_prev[ii];
480     }
481 
482     v_prev_norm = sqrt(v_prev_sum_sq);
483     v_err = v_prev_norm;
484 
485     /* get a sparse array */
486     sparse_array = create_sparse_corr_array(xvectim, sparsity, thresh,
487         cc_pearson_corr, (long)mem_bytes);
488     /* validate success in creating sparse array */
489     if( sparse_array == NULL )
490     {
491         if( v_new != NULL ) free(v_new);
492         if( v_prev != NULL ) free(v_prev);
493         WARNING_message("Error getting sparse weight array.");
494         return( NULL );
495     }
496 
497     /*-- CC update our memory stats to reflect v_new -- */
498     INC_MEM_STATS(sparse_array->peak_mem_usage, "sparse_array");
499     DEC_MEM_STATS(sparse_array->peak_mem_usage-(sizeof(sparse_array_head_node)+
500         sparse_array->num_nodes*sizeof(sparse_array_node)), "sparse_array");
501 
502     /* power iterator */
503     power_it = 0;
504 
505     while( (v_err > eps) && (power_it < max_iter) )
506     {
507 
508         /* zero out the new vector */
509         bzero(v_new, xvectim->nvec*sizeof(double));
510 
511         /* reset the number of superthreshold values to 0 */
512         nvals = 0;
513 
514         /* begin by adding in the correlation for the diagonal element
515            (i.e. 1.0) */
516         for( ii = 0; ii < xvectim->nvec; ii++ )
517         {
518             v_new[ii] += scale*(1.0+shift)*v_prev[ii];
519         }
520 
521         /* iterate through sparse array and calculate the matrix product */
522         tptr = sparse_array->nodes;
523         while( tptr != NULL )
524         {
525             if( binary == 1 )
526             {
527                 v_new[tptr->row] += scale*(1.0+shift)*v_prev[tptr->column];
528                 v_new[tptr->column] += scale*(1.0+shift)*v_prev[tptr->row];
529             }
530             else
531             {
532                 v_new[tptr->row] += scale*(tptr->weight+shift)*
533                     v_prev[tptr->column];
534                 v_new[tptr->column] += scale*(tptr->weight+shift)*
535                     v_prev[tptr->row];
536             }
537             tptr = tptr->next;
538         }
539 
540         /* calculate the error, norms, and sums for the next
541            iteration */
542         v_prev_sum = 0.0;
543         v_new_norm = 0.0;
544         v_new_sum_sq = 0.0;
545         v_err = 0.0;
546 
547         /* calculate the norm of the new vector */
548         for( ii=0 ; ii < xvectim->nvec ; ii++ )
549         {  /*----- outer voxel loop -----*/
550             v_new_sum_sq += v_new[ii]*v_new[ii];
551         }
552         v_new_norm = sqrt(v_new_sum_sq);
553 
554         /* normalize the new vector, calculate the
555            error between this vector and the previous,
556            and get the sum */
557         v_new_sum_sq = 0.0;
558         for( ii=0; ii < xvectim->nvec; ii++ )
559         {
560             double vdiff = 0;
561             v_new[ii] = v_new[ii] / v_new_norm;
562             v_new_sum_sq += v_new[ii]*v_new[ii];
563 
564             /* calcualte the differences */
565             vdiff = v_prev[ii] - v_new[ii];
566             v_err += (vdiff * vdiff);
567         }
568 
569         v_err = sqrt(v_err) / v_prev_norm;
570         v_prev_norm = sqrt(v_new_sum_sq);
571 
572         /* now set the new vec to the previous */
573         v_temp = v_prev;
574         v_prev = v_new;
575         v_new = v_temp;
576 
577         /* increment iteration counter */
578         power_it++;
579 
580         /* tell the user what has happened */
581         if((power_it % 10) == 0)
582             INFO_message ("Finished iter %d: Verr %3.3f, Vnorm %3.3f\n",
583                 power_it, v_err, v_prev_norm);
584     }
585 
586     if ((v_err >= eps) && (power_it >= max_iter))
587     {
588         WARNING_message("Power iteration did not converge (%3.3f >= %3.3f)\n"
589             "in %d iterations. You might consider increase max_iters, or\n"
590             "epsilon. For now we are writing out the obtained solution,\n"
591             "which may or may not be good enough.\n",
592             (v_err), (eps), (power_it));
593     }
594 
595     /* the eigenvector that we are interested in should now be in v_prev,
596        free all other power iteration temporary vectors */
597     if( v_new != NULL )
598     {
599         free(v_new);
600         v_new = NULL;
601 
602         /* update running memory statistics to reflect freeing the vectim */
603         DEC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
604     }
605 
606     /* free the weight matrix */
607     if( sparse_array != NULL )
608     {
609         /* update running memory statistics to reflect freeing the vectim */
610         DEC_MEM_STATS(sizeof(sparse_array_head_node)+sparse_array->num_nodes*
611             sizeof(sparse_array_node), "sparse_array");
612 
613         sparse_array = free_sparse_array(sparse_array);
614     }
615 
616     PRINT_MEM_STATS( "return then result" );
617 
618     /* return the result */
619     return(v_prev);
620 }
621 
622 
calc_full_power_max_mem(MRI_vectim * xvectim,double thresh,double shift,double scale,double eps,long max_iter,long binary)623 double* calc_full_power_max_mem(MRI_vectim *xvectim, double thresh, double shift,
624      double scale, double eps, long max_iter, long binary)
625 {
626     /* CC - we need a few arrays for calculating the power method */
627     double* v_prev = NULL;
628     double* v_new = NULL;
629     double* v_temp = NULL;
630     double* weight_matrix = NULL;
631     long*   ndx_vec = NULL;
632     double  v_err = 0.0;
633     long    power_it;
634     double  v_new_sum_sq = 0.0;
635     double  v_new_norm = 0.0;
636     double  v_prev_sum_sq = 0.0;
637     double  v_prev_norm = 0.0;
638     double  v_prev_sum = 0.0;
639     long    ii = 0;
640     long    nvals = 0;
641     long    wsize = 0;
642 
643     /* -- CC initialize memory for power iteration */
644     /* -- v_new will hold the new vector as it is being calculated */
645     v_new = (double*)calloc(xvectim->nvec,sizeof(double));
646 
647     if( v_new == NULL )
648     {
649         WARNING_message("Cannot allocate %d bytes for v_new",xvectim->nvec*sizeof(double));
650         return( NULL );
651     }
652 
653     /*-- CC update our memory stats to reflect v_new -- */
654     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
655     PRINT_MEM_STATS( "v_new" );
656 
657     /* -- v_prev will hold the vector from the previous calculation */
658     v_prev = (double*)calloc(xvectim->nvec,sizeof(double));
659 
660     if( v_prev == NULL )
661     {
662         if( v_new != NULL ) free(v_new);
663         WARNING_message("Cannot allocate %d bytes for v_prev",xvectim->nvec*sizeof(double));
664         return( NULL );
665     }
666 
667     /*-- CC update our memory stats to reflect v_new -- */
668     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_prev");
669     PRINT_MEM_STATS( "v_prev" );
670 
671 
672     /* we will use another array to help us quickly calculate iterator
673        so that we only need to store the unique part of the array */
674     ndx_vec = (long*)malloc(xvectim->nvec*sizeof(long));
675 
676     if( ndx_vec == NULL )
677     {
678         if( v_new != NULL ){ free(v_new); v_new=NULL;}
679         if( v_prev != NULL ){ free(v_prev); v_prev=NULL;}
680         WARNING_message("Cannot allocate %d bytes for ndx_vec",xvectim->nvec*sizeof(long));
681         return( NULL );
682     }
683 
684     /*-- CC update our memory stats to reflect ndx_vec -- */
685     INC_MEM_STATS(xvectim->nvec*sizeof(double), "ndx_vec");
686     PRINT_MEM_STATS( "ndx_vec" );
687 
688     /* also use this opportunity to set the ndx_vec */
689     /*ndx_vec[0] = xvectim->nvec - 1;*/
690     ndx_vec[0] = 0;
691     for( ii=1; ii<xvectim->nvec; ii++ )
692     {
693         ndx_vec[ii] = ndx_vec[ii-1]+xvectim->nvec-(ii+1);
694     }
695 
696     /*---------- loop over mask voxels, correlate ----------*/
697 
698     /*  set the initial vector to the first vector */
699     for ( ii=0; ii<xvectim->nvec; ii++ )
700     {
701         v_prev[ii]=1.0 / sqrt((double)xvectim->nvec);
702         v_prev_sum += v_prev[ii];
703         v_prev_sum_sq += v_prev[ii] * v_prev[ii];
704     }
705 
706     v_prev_norm = sqrt(v_prev_sum_sq);
707     v_err = v_prev_norm;
708 
709     /* set the size of the weight matrix we will include the diagonal */
710     wsize = 0.5*(xvectim->nvec-1)*(xvectim->nvec);
711 
712     power_it = 0;
713 
714     while( (v_err > eps) && (power_it < max_iter) )
715     {
716 
717         /* zero out the new vector */
718         bzero(v_new, xvectim->nvec*sizeof(double));
719 
720         /* reset the number of superthreshold values to 0 */
721         nvals = 0;
722 
723         if ( weight_matrix == NULL )
724         {
725 
726             /* Allocate memory for the matrix */
727             weight_matrix = (double*)calloc(wsize,sizeof(double));
728 
729             if( weight_matrix == NULL )
730             {
731                 if( v_new != NULL ) free(v_new);
732                 if( v_prev != NULL ) free(v_prev);
733                 if( ndx_vec != NULL ) free(ndx_vec);
734                 WARNING_message("Cannot allocate %d bytes for weight_mat",wsize*sizeof(double));
735                 return( NULL );
736             }
737 
738             /*-- CC update our memory stats to reflect weight_matrix -- */
739             INC_MEM_STATS(wsize*sizeof(double), "weight_mat");
740             PRINT_MEM_STATS( "weight_mat" );
741 
742             AFNI_OMP_START ;
743 #pragma omp parallel if( xvectim->nvec > 999 )
744             {
745 
746                 long thr_lout,thr_lin,ithr,nthr,vstep,vii,vndx ;
747                 double car = 0.0;
748                 double max_val = -10.0;
749                 float *xsar = NULL;
750                 float *ysar = NULL;
751 
752                 /*-- get information about who we are --*/
753 #ifdef USE_OMP
754                 ithr = omp_get_thread_num() ;
755                 nthr = omp_get_num_threads() ;
756                 if( ithr == 0 ) INFO_message("%d OpenMP threads started",nthr) ;
757 #else
758                 ithr = 0 ; nthr = 1 ;
759 #endif
760 
761                 vstep = (int)( xvectim->nvec / (nthr*50.0f) + 0.901f ); vii = 0;
762                 if((MEM_STAT==0) && (ithr==0)) fprintf(stderr,"Looping:");
763 
764                 /* iterate through and build the correlation matrix, this can be done at
765                    the same time as the first iteration of the power method, so lets
766                    do it! */
767 #pragma omp for schedule(static, 1)
768                 for( thr_lout=(xvectim->nvec-1) ; thr_lout >= 0 ; thr_lout-- )
769                 {  /*----- outer voxel loop -----*/
770 
771                     if( ithr == 0 && vstep > 2 )
772                     {
773                         vii++;
774                         if( vii%vstep == vstep/2 && MEM_STAT == 0)
775                         {
776                             vstep_print();
777                         }
778                     }
779 
780                     /* begin by adding in the correlation for the diagonal element
781                        (i.e. 1.0) */
782 #pragma omp critical(dataupdate)
783                     {
784                         v_new[thr_lout] += scale*(1.0+shift)*v_prev[thr_lout];
785                     }
786 
787                     /* get ref time series from this voxel */
788                     xsar = VECTIM_PTR(xvectim,thr_lout) ;
789 
790                     /* iterate over the inner voxels */
791                     for( thr_lin=0; thr_lin<thr_lout; thr_lin++ )
792                     {
793                         /* extract the time series for the target voxel */
794                         ysar = VECTIM_PTR(xvectim,thr_lin) ;
795 
796                         /* calculate the correlation coefficient, and transform it */
797                         car = (double)cc_pearson_corr(xvectim->nvals,xsar,ysar);
798 
799                         /* only need to add in non-zero values, this hopefully will
800                            reduce some of the competition for the critical section */
801                         if( car >= thresh )
802                         {
803                             /* use a critical section to make sure that the values
804                                do not get corrupted */
805 #pragma omp critical(dataupdate)
806                             {
807                                 nvals = nvals+1;
808 
809                                 if( binary == 1 )
810                                 {
811                                     v_new[thr_lout] += scale*(1.0+shift)*v_prev[thr_lin];
812                                     v_new[thr_lin] += scale*(1.0+shift)*v_prev[thr_lout];
813                                 }
814                                 else
815                                 {
816                                     v_new[thr_lout] += scale*(car+shift)*v_prev[thr_lin];
817                                     v_new[thr_lin] += scale*(car+shift)*v_prev[thr_lout];
818                                 }
819                             }
820 
821                             /* incorporate the correlation in the weight_matrix, since
822                                each value is only calculated once, we don't need a
823                                critical sections */
824                             vndx = ndx_vec[(xvectim->nvec-(thr_lout+1))] + thr_lin;
825                             if(( vndx >= 0 ) && ( vndx < wsize ))
826                             {
827                                 if( binary == 1 )
828                                 {
829                                     weight_matrix[ vndx ]=scale*(1.0+shift);
830                                 }
831                                 else
832                                 {
833                                     weight_matrix[ vndx ]=scale*(car+shift);
834                                 }
835                             }
836                             else
837                             {
838                                 fprintf(stderr, "Index A (%ld >= %ld) out of bounds %ld %ld\n", vndx,
839                                     wsize, thr_lout, thr_lin);
840                             }
841 
842                         }
843                     }
844                 } /* for lout */
845             } /* AFNI_OMP */
846             AFNI_OMP_END;
847 
848             /* end the looping print */
849             fprintf(stderr, "\n");
850         } /* end if weight_matrix == NULL */
851         else
852         {
853             /* now use precomputed matrix */
854             AFNI_OMP_START ;
855 #pragma omp parallel if( xvectim->nvec > 999 )
856             {
857 
858                 long thrb_lout,thrb_lin,ithr,nthr,vstep,vii,vndx ;
859                 double car = 0.0;
860                 float *xsar = NULL;
861                 float *ysar = NULL;
862 
863                 /*-- get information about who we are --*/
864 #ifdef USE_OMP
865                 ithr = omp_get_thread_num() ;
866                 nthr = omp_get_num_threads() ;
867                 if( ithr == 0 ) INFO_message("%d OpenMP threads started",nthr) ;
868 #else
869                 ithr = 0 ; nthr = 1 ;
870 #endif
871 
872                 vstep = (int)( xvectim->nvec / (nthr*50.0f) + 0.901f ); vii = 0;
873                 if((MEM_STAT==0) && (ithr==0)) fprintf(stderr,"Looping:");
874 
875                 /* iterate through and build the correlation matrix, this can be done at
876                    the same time as the first iteration of the power method, so lets
877                    do it! */
878 #pragma omp for schedule(static, 1)
879                 for( thrb_lout=(xvectim->nvec-1) ; thrb_lout >= 0 ; thrb_lout-- )
880                 {  /*----- outer voxel loop -----*/
881 
882                     if( ithr == 0 && vstep > 2 )
883                     { vii++; if( vii%vstep == vstep/2 && MEM_STAT == 0) vstep_print(); }
884 
885                     /* begin by adding in the correlation for the diagonal element
886                        (i.e. 1.0) */
887 #pragma omp critical(dataupdate)
888                     {
889                         v_new[thrb_lout] += scale*(1.0+shift)*v_prev[thrb_lout];
890                     }
891 
892                     /* iterate over the inner voxels */
893                     /*for( thrb_lin=(thrb_lout+1); thrb_lin<xvectim->nvec; thrb_lin++ )*/
894                     for( thrb_lin=0; thrb_lin<thrb_lout; thrb_lin++ )
895                     {
896 
897                         /* get the correlation */
898                         vndx = ndx_vec[(xvectim->nvec-(thrb_lout+1))] + thrb_lin;
899                         if((vndx >= 0 ) && (vndx < wsize ))
900                         {
901                             car = weight_matrix[vndx];
902                         }
903                         else
904                         {
905                             fprintf(stderr, "Index B (%ld > %ld) out of bounds %ld %ld\n", vndx,
906                                 wsize,thrb_lout, thrb_lin);
907                         }
908 
909                         /* only need to add in non-zero values, this hopefully will
910                            reduce some of the competition for the critical section */
911                         if( car != 0.0 )
912                         {
913                             /* use a critical section to make sure that the values
914                                do not get corrupted */
915 #pragma omp critical(dataupdate)
916                             {
917                                 nvals=nvals+1;
918                                 v_new[thrb_lout] += car*v_prev[thrb_lin];
919                                 v_new[thrb_lin] += car*v_prev[thrb_lout];
920                             }
921                         }
922                     }
923                 } /* for thrb_lout */
924             } /* AFNI_OMP */
925             AFNI_OMP_END;
926 
927             /* end the looping print */
928             fprintf(stderr, "\n");
929         }
930 
931         /* if none of the correlations were above threshold, stop and error */
932         if( nvals == 0 ) break;
933 
934         /* calculate the error, norms, and sums for the next
935            iteration */
936         v_prev_sum = 0.0;
937         v_new_norm = 0.0;
938         v_new_sum_sq = 0.0;
939         v_err = 0.0;
940 
941         /* calculate the norm of the new vector */
942         for( ii=0 ; ii < xvectim->nvec ; ii++ )
943         {  /*----- outer voxel loop -----*/
944             v_new_sum_sq += v_new[ii]*v_new[ii];
945         }
946         v_new_norm = sqrt(v_new_sum_sq);
947 
948         /* normalize the new vector, calculate the
949            error between this vector and the previous,
950            and get the sum */
951         v_new_sum_sq = 0.0;
952         for( ii=0; ii < xvectim->nvec; ii++ )
953         {
954             double vdiff = 0;
955             v_new[ii] = v_new[ii] / v_new_norm;
956             v_new_sum_sq += v_new[ii]*v_new[ii];
957 
958             /* calcualte the differences */
959             vdiff = v_prev[ii] - v_new[ii];
960             v_err += (vdiff * vdiff);
961         }
962 
963         v_err = sqrt(v_err) / v_prev_norm;
964         v_prev_norm = sqrt(v_new_sum_sq);
965 
966         /* now set the new vec to the previous */
967         v_temp = v_prev;
968         v_prev = v_new;
969         v_new = v_temp;
970 
971         /* increment iteration counter */
972         power_it++;
973 
974         /* tell the user what has happened */
975         if ((power_it % 10) == 0)
976             INFO_message ("Finished iter %d: Verr %3.3f, Vnorm %3.3f\n",
977                 power_it, v_err, v_prev_norm);
978     }
979 
980     if ( nvals == 0 )
981     {
982         WARNING_message("None of the correlations were above the threshold,"
983             "the output will be empty\n");
984     }
985     else
986     {
987         INFO_message ("Found %ld values above the threshold (> %lf).",nvals,thresh);
988 
989         if ((v_err >= eps) && (power_it >= max_iter))
990         {
991             WARNING_message("Power iteration did not converge (%3.3f >= %3.3f)\n"
992                 "in %d iterations. You might consider increase max_iters, or\n"
993                 "epsilon. For now we are writing out the obtained solution,\n"
994                 "which may or may not be good enough.\n",
995                 (v_err), (eps), (power_it));
996         }
997     }
998 
999     /* the eigenvector that we are interested in should now be in v_prev,
1000        free all other power iteration temporary vectors */
1001     if( v_new != NULL )
1002     {
1003         free(v_new);
1004         v_new = NULL;
1005 
1006         /* update running memory statistics to reflect freeing the vectim */
1007         DEC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
1008     }
1009 
1010     /* free the weight matrix */
1011     if( weight_matrix != NULL )
1012     {
1013         free(weight_matrix);
1014         weight_matrix = NULL;
1015 
1016         /* update running memory statistics to reflect freeing the vectim */
1017         DEC_MEM_STATS(wsize*sizeof(double), "weight_matrix");
1018     }
1019 
1020     /* free the ndx vector */
1021     if( ndx_vec != NULL )
1022     {
1023         free(ndx_vec);
1024         ndx_vec = NULL;
1025 
1026         /* update running memory statistics to reflect freeing the vectim */
1027         DEC_MEM_STATS(xvectim->nvec*sizeof(long), "ndx_vec");
1028     }
1029 
1030 
1031     /* return the result */
1032     return(v_prev);
1033 }
1034 
calc_full_power_min_mem(MRI_vectim * xvectim,double thresh,double shift,double scale,double eps,long max_iter,long binary)1035 double* calc_full_power_min_mem(MRI_vectim *xvectim, double thresh, double shift, double scale, double eps, long max_iter, long binary)
1036 {
1037     /* CC - we need a few arrays for calculating the power method */
1038     double* v_prev = NULL;
1039     double* v_new = NULL;
1040     double* v_temp = NULL;
1041     double  v_err = 0.0;
1042     long    power_it;
1043     double  v_new_sum_sq = 0.0;
1044     double  v_new_norm = 0.0;
1045     double  v_prev_sum_sq = 0.0;
1046     double  v_prev_norm = 0.0;
1047     double  v_prev_sum = 0.0;
1048     long    ii = 0;
1049     long    nvals = 0;
1050 
1051     /* -- CC initialize memory for power iteration */
1052     /* -- v_new will hold the new vector as it is being calculated */
1053     v_new = (double*)calloc(xvectim->nvec,sizeof(double));
1054 
1055     if( v_new == NULL )
1056     {
1057         WARNING_message("Cannot allocate %d bytes for v_new",xvectim->nvec*sizeof(double));
1058         return( NULL );
1059     }
1060 
1061     /*-- CC update our memory stats to reflect v_new -- */
1062     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
1063     PRINT_MEM_STATS( "v_new" );
1064 
1065     /* -- v_prev will hold the vector from the previous calculation */
1066     v_prev = (double*)calloc(xvectim->nvec,sizeof(double));
1067 
1068     if( v_prev == NULL )
1069     {
1070         if( v_new != NULL ) free(v_new);
1071         WARNING_message("Cannot allocate %d bytes for v_prev",xvectim->nvec*sizeof(double));
1072         return( NULL );
1073     }
1074 
1075     /*-- CC update our memory stats to reflect v_new -- */
1076     INC_MEM_STATS(xvectim->nvec*sizeof(double), "v_prev");
1077     PRINT_MEM_STATS( "v_prev" );
1078 
1079     /*---------- loop over mask voxels, correlate ----------*/
1080 
1081     /*  set the initial vector to the first vector */
1082     for ( ii=0; ii<xvectim->nvec; ii++ )
1083     {
1084         v_prev[ii]=1.0 / sqrt((double)xvectim->nvec);
1085         v_prev_sum += v_prev[ii];
1086         v_prev_sum_sq += v_prev[ii] * v_prev[ii];
1087     }
1088 
1089     v_prev_norm = sqrt(v_prev_sum_sq);
1090     v_err = v_prev_norm;
1091 
1092     power_it = 0;
1093 
1094     while( (v_err > eps) && (power_it < max_iter) )
1095     {
1096 
1097         /* zero out the new vector */
1098         bzero(v_new, xvectim->nvec*sizeof(double));
1099 
1100         /* reset the number of superthreshold values to 0 */
1101         nvals = 0;
1102 
1103         AFNI_OMP_START ;
1104 #pragma omp parallel if( xvectim->nvec > 999 )
1105         {
1106 
1107             int lout,lin,ithr,nthr,vstep,vii ;
1108             double car = 0.0;
1109             float *xsar = NULL;
1110             float *ysar = NULL;
1111 
1112             /*-- get information about who we are --*/
1113 #ifdef USE_OMP
1114             ithr = omp_get_thread_num() ;
1115             nthr = omp_get_num_threads() ;
1116             if( ithr == 0 ) INFO_message("%d OpenMP threads started",nthr) ;
1117 #else
1118             ithr = 0 ; nthr = 1 ;
1119 #endif
1120 
1121 
1122             vstep = (int)( xvectim->nvec / (nthr*50.0f) + 0.901f ); vii = 0;
1123             if((MEM_STAT==0) && (ithr==0)) fprintf(stderr,"Looping:");
1124 
1125             /* calculate the power method */
1126 #pragma omp for schedule(static, 1)
1127             for( lout=0 ; lout < xvectim->nvec ; lout++ )
1128             {  /*----- outer voxel loop -----*/
1129 
1130                 if( ithr == 0 && vstep > 2 )
1131                 { vii++; if( vii%vstep == vstep/2 && MEM_STAT == 0) vstep_print(); }
1132 
1133                 /* get ref time series from this voxel */
1134                 xsar = VECTIM_PTR(xvectim,lout) ;
1135 
1136                 /* iterate over the inner voxels */
1137                 for( lin=lout; lin<xvectim->nvec; lin++ )
1138                 {
1139                     if(lin == lout)
1140                     {
1141                         car = 1.0;
1142                     }
1143                     else
1144                     {
1145                         /* extract the time series for the target voxel */
1146                         ysar = VECTIM_PTR(xvectim,lin) ;
1147 
1148                         /* calculate the correlation coefficient */
1149                         car = (double)cc_pearson_corr(xvectim->nvals,xsar,ysar);
1150                     }
1151 
1152                     /* if above threshold then add in the value */
1153                     if( car >= thresh )
1154                     {
1155                         /* use a critical section to make sure that the values
1156                            do not get corrupted */
1157 #pragma omp critical(dataupdate)
1158                         {
1159                             nvals = nvals+1;
1160                             if( binary == 1 )
1161                             {
1162                                 v_new[lout] += scale*(1.0+shift)*v_prev[lin];
1163                                 v_new[lin] += scale*(1.0+shift)*v_prev[lout];
1164                             }
1165                             else
1166                             {
1167                                 v_new[lout] += scale*(car+shift)*v_prev[lin];
1168                                 v_new[lin] += scale*(car+shift)*v_prev[lout];
1169                             }
1170                         }
1171                     }
1172                 }
1173             } /* for lout */
1174         } /* AFNI_OMP */
1175         AFNI_OMP_END;
1176 
1177         /* end the looping print */
1178         fprintf(stderr, "\n");
1179 
1180 
1181         /* if none of the correlations were above threshold, stop and error */
1182         if( nvals == 0 ) break;
1183 
1184         /* calculate the error, norms, and sums for the next
1185            iteration */
1186         v_prev_sum = 0.0;
1187         v_new_norm = 0.0;
1188         v_new_sum_sq = 0.0;
1189         v_err = 0.0;
1190 
1191         /* calculate the norm of the new vector */
1192         for( ii=0 ; ii < xvectim->nvec ; ii++ )
1193         {  /*----- outer voxel loop -----*/
1194             v_new_sum_sq += v_new[ii]*v_new[ii];
1195         }
1196         v_new_norm = sqrt(v_new_sum_sq);
1197 
1198         /* normalize the new vector, calculate the
1199            error between this vector and the previous,
1200            and get the sum */
1201         v_new_sum_sq = 0.0;
1202         for( ii=0; ii < xvectim->nvec; ii++ )
1203         {
1204             double vdiff = 0;
1205             v_new[ii] = v_new[ii] / v_new_norm;
1206             v_new_sum_sq += v_new[ii]*v_new[ii];
1207 
1208             /* calcualte the differences */
1209             vdiff = v_prev[ii] - v_new[ii];
1210             v_err += (vdiff * vdiff);
1211         }
1212 
1213         v_err = sqrt(v_err) / v_prev_norm;
1214         v_prev_norm = sqrt(v_new_sum_sq);
1215 
1216         /* now set the new vec to the previous */
1217         v_temp = v_prev;
1218         v_prev = v_new;
1219         v_new = v_temp;
1220 
1221         /* increment iteration counter */
1222         power_it++;
1223 
1224         /* tell the user what has happened */
1225         if ((power_it % 10) == 0 )
1226             INFO_message ("Finished iter %d: Verr %3.3f, Vnorm %3.3f\n",
1227                 power_it, v_err, v_prev_norm);
1228     }
1229 
1230     if ( nvals == 0 )
1231     {
1232         WARNING_message("None of the correlations were above the threshold,"
1233             "the output will be empty\n");
1234     }
1235     else if ((v_err >= eps) && (power_it >= max_iter))
1236     {
1237         WARNING_message("Power iteration did not converge (%3.3f >= %3.3f)\n"
1238             "in %d iterations. You might consider increase max_iters, or\n"
1239             "epsilon. For now we are writing out the optained solution,\n"
1240             "which may or may not be good enough.\n",
1241             (v_err), (eps), (power_it));
1242     }
1243 
1244     /* the eigenvector that we are interested in should now be in v_prev,
1245        free all other power iteration temporary vectors */
1246     if( v_new != NULL )
1247     {
1248         free(v_new);
1249         v_new = NULL;
1250 
1251         /* update running memory statistics to reflect freeing the vectim */
1252         DEC_MEM_STATS(xvectim->nvec*sizeof(double), "v_new");
1253     }
1254 
1255     /* return the result */
1256     return(v_prev);
1257 }
1258 
1259 /* 3dECM was created from 3dAutoTCorrelate by
1260    R. Cameron Craddock */
1261 
1262 /*----------------------------------------------------------------*/
1263 /* General correlation calculation. */
1264 
1265 #if 0
1266 float my_THD_pearson_corr( int n, float *x , float *y )
1267 {
1268    float xv,yv,xy , vv,ww , xm,ym ;
1269    register int ii ;
1270 
1271    xm = ym = 0.0f ;
1272    for( ii=0 ; ii < n ; ii++ ){ xm += x[ii] ; ym += y[ii] ; }
1273    xm /= n ; ym /= n ;
1274    xv = yv = xy = 0.0f ;
1275    for( ii=0 ; ii < n ; ii++ ){
1276      vv = x[ii]-xm ; ww = y[ii]-ym ; xv += vv*vv ; yv += ww*ww ; xy += vv*ww ;
1277    }
1278 
1279    if( xv <= 0.0f || yv <= 0.0f ) return 0.0f ;
1280    return xy/sqrtf(xv*yv) ;
1281 }
1282 #endif
1283 
1284 /*----------------------------------------------------------------*/
1285 /*! eta^2 (Cohen, NeuroImage 2008)              25 Jun 2010 [rickr]
1286  *
1287  *  eta^2 = 1 -  SUM[ (a_i - m_i)^2 + (b_i - m_i)^2 ]
1288  *               ------------------------------------
1289  *               SUM[ (a_i - M  )^2 + (b_i - M  )^2 ]
1290  *
1291  *  where  o  a_i and b_i are the vector elements
1292  *         o  m_i = (a_i + b_i)/2
1293  *         o  M = mean across both vectors
1294  -----------------------------------------------------------------*/
1295 
my_THD_eta_squared(int n,float * x,float * y)1296 float my_THD_eta_squared( int n, float *x , float *y )
1297 {
1298    float num , denom , gm , lm, vv, ww;
1299    register int ii ;
1300 
1301    gm = 0.0f ;
1302    for( ii=0 ; ii < n ; ii++ ){ gm += x[ii] + y[ii] ; }
1303    gm /= (2.0f*n) ;
1304 
1305    num = denom = 0.0f ;
1306    for( ii=0 ; ii < n ; ii++ ){
1307      lm = 0.5f * ( x[ii] + y[ii] ) ;
1308      vv = (x[ii]-lm); ww = (y[ii]-lm); num   += ( vv*vv + ww*ww );
1309      vv = (x[ii]-gm); ww = (y[ii]-gm); denom += ( vv*vv + ww*ww );
1310    }
1311 
1312    if( num < 0.0f || denom <= 0.0f || num >= denom ) return 0.0f ;
1313    return (1.0f - num/denom) ;
1314 }
1315 
1316 /*-----------------------------------------------------------------------------*/
1317 
1318 /*----------------------------------------------------------------------------*/
1319 
main(int argc,char * argv[])1320 int main( int argc , char *argv[] )
1321 {
1322     THD_3dim_dataset *xset = NULL;
1323     THD_3dim_dataset *cset = NULL;
1324     THD_3dim_dataset *mset = NULL ;
1325     int nopt=1 , method=PEARSON , do_autoclip=0 ;
1326     int nvox , nvals , ii, polort=1 ;
1327     char *prefix = "ECM" ;
1328     byte *mask=NULL;
1329     int   nmask , abuc=1 ;
1330     char str[32] , *cpt = NULL;
1331     int *mask_ndx_to_vol_ndx = NULL;
1332     MRI_vectim *xvectim = NULL ;
1333 
1334     /* CC - flags that control options */
1335     double  scale = 1.0;
1336     double  shift = 0.0;
1337     double  thresh = -1.2;
1338     double  sparsity = 100.0;
1339     long    mem_bytes = (long)2147483648;
1340 
1341     /* CC - flags to control behaviour */
1342     long do_scale = 0;
1343     long do_shift = 0;
1344     long do_sparsity = 0;
1345     long do_thresh = 0;
1346     long do_binary = 0;
1347     long do_full = 0;
1348     long do_fecm = 0;
1349 
1350     /* CC - iteration stopping criteria */
1351     long max_iter = 10000;
1352     double eps = 0.00001;
1353 
1354     /* CC - we will have two subbricks: binarized and weighted */
1355     int nsubbriks = 1;
1356     int subbrik = 0;
1357     float * wodset;
1358 
1359      /* CC - vectors to hold the results (bin/wght) */
1360     double* eigen_vec = NULL;
1361 
1362     /* CC - number of voxels with zero variance that were
1363        masked out */
1364     int rem_count = 0;
1365 
1366    /*----*/
1367 
1368    AFNI_SETUP_OMP(0) ;  /* 24 Jun 2013 */
1369 
1370    if( argc < 2 || strcmp(argv[1],"-help") == 0 || strcmp(argv[1],"-h") == 0 ){
1371       printf(
1372 "Usage: 3dECM [options] dset\n"
1373 "  Computes voxelwise eigenvector centrality (ECM) and\n"
1374 "  stores the result in a new 3D bucket dataset as floats to\n"
1375 "  preserve their values. ECM of a voxel reflects the strength\n"
1376 "  and extent of a voxel's global connectivity as well as the\n"
1377 "  importance of the voxels that it is directly connected to.\n\n"
1378 "  Conceptually the process involves: \n"
1379 "      1. Calculating the correlation between voxel time series for\n"
1380 "         every pair of voxels in the brain (as determined by masking)\n"
1381 "      2. Calculate the eigenvector corresponding to the largest\n"
1382 "         eigenvalue of the similarity matrix.\n\n"
1383 "  Guaranteeing that the largest eigenvector is unique and therefore,\n"
1384 "  that an ECM solution exists, requires that the similarity matrix\n"
1385 "  is strictly positive. This is enforced by either adding one to\n"
1386 "  the correlations as in (Lohmann et. al. 2010), or by adding one\n"
1387 "  and dividing by two (Wink et al. 2012). \n\n"
1388 "  Calculating the first eigenvector of a whole-brain similarity matrix\n"
1389 "  requires alot of system memory and time. 3dECM uses the optimizations\n"
1390 "  described in (Wink et al 2012) to improve performance. It additionally\n"
1391 "  provides a mechanism for limited the amount of system memory used to\n"
1392 "  avoid memory related crashes.\n\n"
1393 "  The perfromance can also be improved by reducing the number of\n"
1394 "  connections in the similarity matrix using either a correlation\n"
1395 "  or sparsity threshold. The correlation threshold simply removes\n"
1396 "  all connections with a correlation less than the threshold. The\n"
1397 "  sparsity threshold is a percentage and reflects the fraction of\n"
1398 "  the strongest connections that should be retained for analysis.\n"
1399 "  Sparsity thresholding uses a histogram approach to 'learn' a \n"
1400 "  correlation threshold that would result in the desired level \n"
1401 "  of sparsity. Due to ties and virtual ties due to poor precision\n"
1402 "  for differentiating connections, the desired level of sparsity \n"
1403 "  will not be met exactly, 3dECM will retain more connections than\n"
1404 "  requested.\n\n"
1405 "  Whole brain ECM results in very small voxel values and small\n"
1406 "  differences between cortical areas. Reducing the number of\n"
1407 "  connections in the analysis improves the voxel values and \n"
1408 "  provides greater contrast between cortical areas\n\n."
1409 "  Lohmann G, Margulies DS, Horstmann A, Pleger B, Lepsien J, et al.\n"
1410 "      (2010) Eigenvector Centrality Mapping for Analyzing\n"
1411 "      Connectivity Patterns in fMRI Data of the Human Brain. PLoS\n"
1412 "      ONE 5(4): e10232. doi: 10.1371/journal.pone.0010232\n\n"
1413 "  Wink, A. M., de Munck, J. C., van der Werf, Y. D., van den Heuvel,\n"
1414 "      O. A., & Barkhof, F. (2012). Fast Eigenvector Centrality\n"
1415 "      Mapping of Voxel-Wise Connectivity in Functional Magnetic\n"
1416 "      Resonance Imaging: Implementation, Validation, and\n"
1417 "      Interpretation. Brain Connectivity, 2(5), 265-274.\n"
1418 "      doi:10.1089/brain.2012.0087\n\n"
1419 "\n"
1420 "Options:\n"
1421 "  -full       = uses the full power method (Lohmann et. al. 2010).\n"
1422 "                Enables the use of thresholding and calculating\n"
1423 "                thresholded centrality. Uses sparse array to reduce \n"
1424 "                memory requirement. Automatically selected if \n"
1425 "                -thresh, or -sparsity are used.\n"
1426 "  -fecm       = uses a shortcut that substantially speeds up \n"
1427 "                computation, but is less flexibile in what can be\n"
1428 "                done the similarity matrix. i.e. does not allow \n"
1429 "                thresholding correlation coefficients. based on \n"
1430 "                fast eigenvector centrality mapping (Wink et. al\n"
1431 "                2012). Default when -thresh, or -sparsity\n"
1432 "                are NOT used.\n"
1433 "  -thresh r   = exclude connections with correlation < r. cannot be\n"
1434 "                used with FECM\n"
1435 "  -sparsity p = only include the top p%% (0 < p <= 100) connectoins in the calculation\n"
1436 "                cannot be used with FECM method. (default)\n"
1437 "  -do_binary  = perform the ECM calculation on a binarized version of the\n"
1438 "                connectivity matrix, this requires a connnectivity or \n"
1439 "                sparsity threshold.\n"
1440 "  -shift s    = value that should be added to correlation coeffs to\n"
1441 "                enforce non-negativity, s >= 0. [default = 0.0, unless\n"
1442 "                -fecm is specified in which case the default is 1.0\n"
1443 "                (e.g. Wink et al 2012)].\n"
1444 "  -scale x    = value that correlation coeffs should be multiplied by\n"
1445 "                after shifting, x >= 0 [default = 1.0, unless -fecm is\n"
1446 "                specified in which case the default is 0.5 (e.g. Wink et\n"
1447 "                al 2012)].\n"
1448 "  -eps p      = sets the stopping criterion for the power iteration\n"
1449 "                l2|v_old - v_new| < eps*|v_old|. default = .001 (0.1%%)\n"
1450 "  -max_iter i = sets the maximum number of iterations to use in\n"
1451 "                in the power iteration. default = 1000\n"
1452 "\n"
1453 "  -polort m   = Remove polynomical trend of order 'm', for m=0..3.\n"
1454 "                [default is m=1; removal is by least squares].\n"
1455 "                Using m=0 means that just the mean is removed.\n"
1456 "\n"
1457 "  -autoclip   = Clip off low-intensity regions in the dataset,\n"
1458 "  -automask   = so that the correlation is only computed between\n"
1459 "                high-intensity (presumably brain) voxels.  The\n"
1460 "                mask is determined the same way that 3dAutomask works.\n"
1461 "\n"
1462 "  -mask mmm   = Mask to define 'in-brain' voxels. Reducing the number\n"
1463 "                the number of voxels included in the calculation will\n"
1464 "                significantly speedup the calculation. Consider using\n"
1465 "                a mask to constrain the calculations to the grey matter\n"
1466 "                rather than the whole brain. This is also preferrable\n"
1467 "                to using -autoclip or -automask.\n"
1468 "\n"
1469 "  -prefix p   = Save output into dataset with prefix 'p'\n"
1470 "                [default prefix is 'ecm'].\n"
1471 "\n"
1472 "  -memory G   = Calculating eignevector centrality can consume alot\n"
1473 "                of memory. If unchecked this can crash a computer\n"
1474 "                or cause it to hang. If the memory hits this limit\n"
1475 "                the tool will error out, rather than affecting the\n"
1476 "                system [default is 2G].\n"
1477 "\n"
1478 "Notes:\n"
1479 " * The output dataset is a bucket type of floats.\n"
1480 " * The program prints out an estimate of its memory used\n"
1481 "    when it ends.  It also prints out a progress 'meter'\n"
1482 "    to keep you pacified.\n"
1483 "\n"
1484 "-- RWCox - 31 Jan 2002 and 16 Jul 2010\n"
1485 "-- Cameron Craddock - 13 Nov 2015 \n"
1486 "-- Daniel Clark - 14 March 2016\n"
1487             ) ;
1488       PRINT_AFNI_OMP_USAGE("3dECM",NULL) ;
1489       PRINT_COMPILE_DATE ; exit(0) ;
1490    }
1491 
1492    mainENTRY("3dECM main"); machdep(); PRINT_VERSION("3dECM cc mods");
1493    AFNI_logger("3dECM",argc,argv);
1494 
1495    /*-- option processing --*/
1496 
1497    while( nopt < argc && argv[nopt][0] == '-' ){
1498 
1499       if( strcmp(argv[nopt],"-time") == 0 ){
1500          abuc = 0 ; nopt++ ; continue ;
1501       }
1502 
1503       if( strcmp(argv[nopt],"-autoclip") == 0 ||
1504           strcmp(argv[nopt],"-automask") == 0   ){
1505 
1506          do_autoclip = 1 ; nopt++ ; continue ;
1507       }
1508 
1509       if( strcmp(argv[nopt],"-mask") == 0 ){
1510          /* mset is opened here, but not loaded? */
1511          mset = THD_open_dataset(argv[++nopt]);
1512          CHECK_OPEN_ERROR(mset,argv[nopt]);
1513          nopt++ ; continue ;
1514       }
1515 
1516       if( strcmp(argv[nopt],"-full") == 0 ){
1517          do_full = 1;
1518          nopt++ ; continue ;
1519       }
1520 
1521       if( strcmp(argv[nopt],"-fecm") == 0 ){
1522          do_fecm = 1;
1523          nopt++ ; continue ;
1524       }
1525 
1526       if( strcmp(argv[nopt],"-max_iter") == 0 ){
1527          int val = (int)strtod(argv[++nopt],&cpt) ;
1528          if( *cpt != '\0' || val < 1 ){
1529             ERROR_EXIT_CC("Illegal value after -max_iter!") ;
1530          }
1531          max_iter = val ; nopt++ ; continue ;
1532       }
1533 
1534       if( strcmp(argv[nopt],"-eps") == 0 ){
1535          double val = (double)strtod(argv[++nopt],&cpt) ;
1536          if( *cpt != '\0' || val < 0 || val > 1 ){
1537             ERROR_EXIT_CC("Illegal value after -eps!") ;
1538          }
1539          eps = val ; nopt++ ; continue ;
1540       }
1541 
1542       if( strcmp(argv[nopt],"-sparsity") == 0 ){
1543          double val = (double)strtod(argv[++nopt],&cpt) ;
1544          if( *cpt != '\0' || val <= 0 || val > 100 ){
1545             ERROR_EXIT_CC("Illegal value after -sparsity!") ;
1546          }
1547          sparsity = val ; do_sparsity = 1; nopt++ ; continue ;
1548       }
1549 
1550       if( strcmp(argv[nopt],"-thresh") == 0 ){
1551          double val = (double)strtod(argv[++nopt],&cpt) ;
1552          if( *cpt != '\0' || val < -1.1 || val > 1 ){
1553             ERROR_EXIT_CC("Illegal value after -thresh!") ;
1554          }
1555          thresh = val ; do_thresh = 1; nopt++ ; continue ;
1556       }
1557 
1558       if( strcmp(argv[nopt],"-memory") == 0 ){
1559          double val = (double)strtod(argv[++nopt],&cpt) ;
1560          if( *cpt != '\0' || val < 0 ){
1561             ERROR_EXIT_CC("Illegal value after -memory!") ;
1562          }
1563          mem_bytes = (long)(ceil(val * (double)1024)*1024*1024);
1564          nopt++ ; continue ;
1565       }
1566 
1567       if( strcmp(argv[nopt],"-scale") == 0 ){
1568          double val = (double)strtod(argv[++nopt],&cpt) ;
1569          if( *cpt != '\0' || val < 0 ){
1570             ERROR_EXIT_CC("Illegal value after -scale!") ;
1571          }
1572          scale = val ; do_scale = 1; nopt++ ; continue ;
1573       }
1574 
1575       if( strcmp(argv[nopt],"-shift") == 0 ){
1576          double val = (double)strtod(argv[++nopt],&cpt) ;
1577          if( *cpt != '\0' || val < 0 ){
1578             ERROR_EXIT_CC("Illegal value after -shift!") ;
1579          }
1580          shift = val ; do_shift = 1; nopt++ ; continue ;
1581       }
1582 
1583       if( strcmp(argv[nopt],"-do_binary") == 0 ){
1584          do_binary = 1; nopt++ ; continue ;
1585       }
1586 
1587       if( strcmp(argv[nopt],"-prefix") == 0 ){
1588          prefix = strdup(argv[++nopt]) ;
1589          if( !THD_filename_ok(prefix) ){
1590             ERROR_EXIT_CC("Illegal value after -prefix!") ;
1591          }
1592          nopt++ ; continue ;
1593       }
1594 
1595       if( strcmp(argv[nopt],"-polort") == 0 ){
1596          int val = (int)strtod(argv[++nopt],&cpt) ;
1597          if( *cpt != '\0' || val < 0 || val > 3 ){
1598             ERROR_EXIT_CC("Illegal value after -polort!") ;
1599          }
1600          polort = val ; nopt++ ; continue ;
1601       }
1602 
1603       if( strcmp(argv[nopt],"-mem_stat") == 0 ){
1604          MEM_STAT = 1 ; nopt++ ; continue ;
1605       }
1606       if( strncmp(argv[nopt],"-mem_profile",8) == 0 ){
1607          MEM_PROF = 1 ; nopt++ ; continue ;
1608       }
1609 
1610       ERROR_message("Unknown option %s\n", argv[nopt]);
1611       suggest_best_prog_option(argv[0], argv[nopt]);
1612       exit(1);
1613    }
1614 
1615    /*-- open dataset, check for legality --*/
1616 
1617     if( nopt >= argc ) ERROR_EXIT_CC("Need a dataset on command line!?") ;
1618     xset = THD_open_dataset(argv[nopt]); CHECK_OPEN_ERROR(xset,argv[nopt]);
1619 
1620     /* Check fast method isnt enabled with non-compatible options */
1621     if (( do_fecm == 1 ) && ((do_sparsity == 1) || (do_thresh == 1) || (do_full == 1)))
1622     {
1623         WARNING_message( "Cannot use FECM, with -sparsity, -thresh,"
1624             " or -full, changing to full power iteration\n");
1625         do_fecm = 0;
1626     }
1627 
1628     if(( do_full == 1 ) && ((do_sparsity == 0) && (do_thresh == 0)))
1629     {
1630         WARNING_message( "Running the full ECM algorithm on a full connectome (e.g. without"
1631             " a correlation threshold or sparsity target can take a long time changing to "
1632             "the fast ECM method, which will do the same thing much more quickly.\n");
1633         do_fecm = 1;
1634         do_full = 0;
1635     }
1636 
1637     if((do_binary == 1) && (((do_sparsity == 0) && (do_thresh == 0)) || (do_fecm == 1)))
1638     {
1639         ERROR_EXIT_CC( "Inconsistent parameters. do_binary requires a sparsity or correlation "
1640             "threshold and connot be calculated using fecm\n");
1641     }
1642 
1643    /* if fecm is specified default to scale = 0.5, shift = 1.0 to be
1644       consistent with Wink et. al. */
1645    if ( do_fecm == 1 )
1646    {
1647        scale = ( do_scale == 0 ) ? 0.5 : scale;
1648        shift = ( do_shift == 0 ) ? 1.0 : shift;
1649    }
1650 
1651    /* -- Load in the input dataset and make sure it is suitable -- */
1652    nvox = DSET_NVOX(xset) ; nvals = DSET_NVALS(xset) ;
1653 
1654    if( nvox * nvals * sizeof(float) > mem_bytes )
1655    {
1656      ERROR_EXIT_CC("Size of input dataset %s ( %s Bytes) exceeds the"
1657          " memory budget ( %s Bytes ). Either increase memory budget\n"
1658          "or use a smaller dataset.", argv[nopt],
1659          commaized_integer_string(nvox*nvals*sizeof(float)),
1660          commaized_integer_string(mem_bytes)) ;
1661    }
1662 
1663    if( nvals < 3 )
1664      ERROR_EXIT_CC("Input dataset %s does not have 3 or more sub-bricks!",
1665         argv[nopt]) ;
1666 
1667    DSET_load(xset) ; CHECK_LOAD_ERROR(xset) ;
1668 
1669    INC_MEM_STATS((nvox * nvals * sizeof(double)), "input dset");
1670    PRINT_MEM_STATS("inset");
1671 
1672    /* if a mask was specified make sure it is appropriate */
1673    if( mset ){
1674 
1675       if( DSET_NVOX(mset) != nvox )
1676          ERROR_EXIT_CC("Input and mask dataset differ in number of voxels!") ;
1677       mask  = THD_makemask(mset, 0, 1.0, 0.0) ;
1678 
1679       /* update running memory statistics to reflect loading the image */
1680       INC_MEM_STATS( mset->dblk->total_bytes, "mask dset" );
1681       PRINT_MEM_STATS( "mset load" );
1682 
1683       /* update statistics to reflect creating mask array */
1684       nmask = THD_countmask( nvox , mask ) ;
1685       INC_MEM_STATS( nmask * sizeof(byte), "mask array" );
1686       PRINT_MEM_STATS( "mask" );
1687 
1688       INFO_message("%d voxels in -mask dataset",nmask) ;
1689       if( nmask < 2 ) ERROR_EXIT_CC("Only %d voxels in -mask, exiting...",nmask);
1690 
1691       /* update running memory statistics to reflect loading the image */
1692       DEC_MEM_STATS( mset->dblk->total_bytes, "mask dset" );
1693       PRINT_MEM_STATS( "mset unload" );
1694 
1695       /* free all memory associated with the mask datast */
1696       DSET_unload(mset) ;
1697       DSET_delete(mset) ;
1698       mset = NULL ;
1699    }
1700    /* if automasking is requested, handle that now */
1701    else if( do_autoclip ){
1702       mask  = THD_automask( xset ) ;
1703       nmask = THD_countmask( nvox , mask ) ;
1704 
1705       INC_MEM_STATS( nmask * sizeof(byte), "mask array" );
1706       PRINT_MEM_STATS( "mask" );
1707 
1708       INFO_message("%d voxels survive -autoclip",nmask) ;
1709       if( nmask < 2 ) ERROR_EXIT_CC("Only %d voxels in -automask!",nmask);
1710    }
1711    /* otherwise we use all of the voxels in the image */
1712    else {
1713         nmask = nvox ;
1714 
1715         mask = (byte *) malloc( sizeof(byte) * nmask ) ;
1716         if( mask == NULL ) ERROR_EXIT_CC("Can't allocate mask?!") ;
1717 
1718         INC_MEM_STATS( nmask * sizeof(byte), "mask array" );
1719         PRINT_MEM_STATS( "mask" );
1720 
1721         /* set it all to ones */
1722         memset(mask, 1, nmask*sizeof(byte));
1723 
1724         INFO_message("computing for all %d voxels\n",nmask) ;
1725    }
1726 
1727     /*-- create vectim from input dataset --*/
1728     INFO_message("vectim-izing input dataset") ;
1729 
1730     /*-- CC added in mask to reduce the size of xvectim -- */
1731     nmask = THD_countmask( nvox , mask ) ;
1732 
1733     xvectim = THD_dset_to_vectim( xset , mask , 0 ) ;
1734     if( xvectim == NULL ) ERROR_EXIT_CC("Can't create vectim?!") ;
1735 
1736     /*-- CC update our memory stats to reflect vectim -- */
1737     INC_MEM_STATS((xvectim->nvec*sizeof(int)) +
1738                     ((xvectim->nvec)*(xvectim->nvals))*sizeof(float) +
1739                     sizeof(MRI_vectim), "vectim");
1740     PRINT_MEM_STATS( "vectim" );
1741 
1742     /*-- CC now that the data is in an easy to use form, go through
1743          and remove all of the zero variance voxels */
1744     for (ii=0; ii < xvectim->nvec; ii++)
1745     {
1746         float* test_vec = VECTIM_PTR(xvectim, ii);
1747         double sum = 0;
1748         double sum2 = 0;
1749         double var = 0;
1750         int    t_ndx;
1751 
1752         for (t_ndx = 0; t_ndx < nvals; t_ndx++)
1753         {
1754             sum += test_vec[t_ndx];
1755             sum2 += test_vec[t_ndx] * test_vec[t_ndx];
1756         }
1757 
1758         var = (sum2-sum*sum/(double)nvals)/(double)(nvals-1);
1759 
1760         if (var < eps)
1761         {
1762             mask[xvectim->ivec[ii]] = 0;
1763             rem_count++;
1764         }
1765     }
1766 
1767     if(rem_count > 0)
1768     {
1769         /* re-generate xvectim with zero variance voxels removed */
1770         /* update running memory statistics to reflect freeing the vectim */
1771         DEC_MEM_STATS(((xvectim->nvec*sizeof(int)) +
1772                         ((xvectim->nvec)*(xvectim->nvals))*sizeof(float) +
1773                         sizeof(MRI_vectim)), "vectim");
1774 
1775         /* toss some trash */
1776         VECTIM_destroy(xvectim) ;
1777 
1778         /* now re-create an xvectim with the fewer number of voxels */
1779         /*-- CC added in mask to reduce the size of xvectim -- */
1780         xvectim = THD_dset_to_vectim( xset , mask , 0 ) ;
1781         if( xvectim == NULL ) ERROR_EXIT_CC("Can't create vectim (var filter)?!") ;
1782 
1783         /*-- CC update our memory stats to reflect vectim -- */
1784         INC_MEM_STATS((xvectim->nvec*sizeof(int)) +
1785                 ((xvectim->nvec)*(xvectim->nvals))*sizeof(float) +
1786                 sizeof(MRI_vectim), "vectim");
1787         PRINT_MEM_STATS( "vectim" );
1788 
1789     }
1790 
1791     if (mask != NULL)
1792     {
1793         /* --- CC free the mask */
1794         DEC_MEM_STATS( nmask*sizeof(byte), "mask array" );
1795         free(mask); mask=NULL;
1796         PRINT_MEM_STATS( "mask unload" );
1797     }
1798 
1799 
1800    /*--- CC the vectim contains a mapping between voxel index and mask index,
1801         tap into that here to avoid duplicating memory usage, this exists
1802         regardless of whether there was a mask ---*/
1803     mask_ndx_to_vol_ndx = xvectim->ivec;
1804     nmask = xvectim->nvec;
1805 
1806     INFO_message("! %d voxels remain after removing voxels with zero variance\n",nmask) ;
1807 
1808     /* -- CC unloading the dataset to reduce memory usage ?? -- */
1809     DEC_MEM_STATS((DSET_NVOX(xset) * DSET_NVALS(xset) * sizeof(double)),
1810         "input dset");
1811     DSET_unload(xset) ;
1812     PRINT_MEM_STATS("inset unload");
1813 
1814     /* -- CC perform detrending -- */
1815     if( polort >= 0 )
1816     {
1817         INFO_message( "Detrending with polort = %d\n", polort );
1818         for( ii=0 ; ii < xvectim->nvec ; ii++ )
1819         {
1820             /* remove polynomial trend */
1821             DETREND_polort(polort,nvals,VECTIM_PTR(xvectim,ii)) ;
1822         }
1823     }
1824 
1825     /* -- CC normalize input data to zero mean and unit variance
1826           this procedure does not change time series that
1827           have zero variance -- */
1828     THD_vectim_normalize(xvectim) ;  /* L2 norm = 1 */
1829 
1830     /* if using sparsity calculate an initial guess for the
1831        correlation threshold that will give that sparsity,
1832        from the p-value */
1833     /* THD_pval_to_stat
1834      */
1835 
1836     /* update the user so that they know what we are up to */
1837 
1838     /* calculate the eigenvector */
1839     if ((do_full == 1) || (do_sparsity == 1) || (do_thresh == 1))
1840     {
1841         /* -- CC tell the user what we are up to */
1842         INFO_message( "Calculating ECM with full method (sparsity=%3.3f%%,\n"
1843             " thresh=%3.3f, scale=%3.3f, shift=%3.3f, max_iter=%d, eps=%3.3f,\n"
1844             " binary=%d, mem=%ld)\n", sparsity, thresh, scale, shift, max_iter,
1845             eps, do_binary, mem_bytes - running_mem);
1846 
1847         eigen_vec=calc_full_power_sparse(xvectim, thresh, sparsity, shift,
1848             scale, eps, max_iter, do_binary, (mem_bytes - running_mem));
1849 
1850     }
1851     else
1852     {
1853         INFO_message( "Calculating ECM with FECM (sparsity=%3.3f%%,thresh=%3.3f,\n"
1854             "  scale=%3.3f, shift=%3.3f, max_iter=%d, eps=%3.3f, binary=%d)\n",
1855             sparsity, thresh, scale, shift, max_iter, eps, do_binary);
1856         eigen_vec=calc_fecm_power(xvectim, shift, scale, eps, max_iter);
1857     }
1858 
1859     if( eigen_vec == NULL )
1860     {
1861         ERROR_EXIT_CC( "Eigen vector calculation failed!\n" );
1862     }
1863 
1864     /*-- create output dataset --*/
1865     cset = EDIT_empty_copy( xset ) ;
1866 
1867     /*-- configure the output dataset */
1868     if( abuc )
1869     {
1870         EDIT_dset_items( cset ,
1871             ADN_prefix    , prefix         ,
1872             ADN_nvals     , nsubbriks      , /*  subbricks */
1873             ADN_ntt       , 0              , /* no time axis */
1874             ADN_type      , HEAD_ANAT_TYPE ,
1875             ADN_func_type , ANAT_BUCK_TYPE ,
1876             ADN_datum_all , MRI_float     ,
1877             ADN_none ) ;
1878     }
1879     else
1880     {
1881         EDIT_dset_items( cset ,
1882             ADN_prefix    , prefix         ,
1883             ADN_nvals     , nsubbriks      , /*  subbricks */
1884             ADN_ntt       , nsubbriks      ,  /* num times */
1885             ADN_ttdel     , 1.0            ,  /* fake TR */
1886             ADN_nsl       , 0              ,  /* no slice offsets */
1887             ADN_type      , HEAD_ANAT_TYPE ,
1888             ADN_func_type , ANAT_EPI_TYPE  ,
1889             ADN_datum_all , MRI_float      ,
1890             ADN_none ) ;
1891     }
1892 
1893     /* add history information to the hearder */
1894     tross_Make_History( "3dECM" , argc,argv , cset ) ;
1895 
1896     ININFO_message("creating output dataset in memory") ;
1897 
1898     /* -- Configure the subbriks -- */
1899     subbrik = 0;
1900 
1901     EDIT_BRICK_TO_NOSTAT(cset,subbrik) ;                     /* stat params  */
1902     /* CC this sets the subbrik scaling factor, which we will probably want
1903         to do again after we calculate the voxel values */
1904     EDIT_BRICK_FACTOR(cset,subbrik,1.0) ;                 /* scale factor */
1905 
1906     if (do_binary == 1)
1907     {
1908         EDIT_BRICK_LABEL(cset,subbrik,"binary ECM") ;
1909     }
1910     else
1911     {
1912         EDIT_BRICK_LABEL(cset,subbrik,"weighted ECM") ;
1913     }
1914 
1915     EDIT_substitute_brick(cset,subbrik,MRI_float,NULL) ;   /* make array   */
1916 
1917     /* copy measure data into the subbrik */
1918     wodset = DSET_ARRAY(cset,subbrik);
1919 
1920     /* increment memory stats */
1921     INC_MEM_STATS( (DSET_NVOX(cset)*DSET_NVALS(cset)*sizeof(float)),
1922         "output dset");
1923     PRINT_MEM_STATS( "outset" );
1924 
1925     /* set all of the voxels in the output image to zero */
1926     bzero(wodset, DSET_NVOX(cset)*sizeof(float));
1927 
1928     /* output the eigenvector, scaling it by sqrt(2) */
1929     for (ii = 0; ii < xvectim->nvec; ii++ )
1930     {
1931         wodset[ mask_ndx_to_vol_ndx[ ii ] ] = (float)(SQRT_2 * eigen_vec[ ii ]);
1932     }
1933 
1934     /* we have copied out v_prev, now we can kill it */
1935     if( eigen_vec != NULL )
1936     {
1937         free(eigen_vec);
1938         eigen_vec = NULL;
1939 
1940         /* update running memory statistics to reflect freeing the vectim */
1941         DEC_MEM_STATS(xvectim->nvec*sizeof(double), "v_prev");
1942     }
1943 
1944     /*-- tell the user what we are about to do --*/
1945     INFO_message("Done..\n") ;
1946 
1947     /* update running memory statistics to reflect freeing the vectim */
1948     DEC_MEM_STATS(((xvectim->nvec*sizeof(int)) +
1949                        ((xvectim->nvec)*(xvectim->nvals))*sizeof(float) +
1950                        sizeof(MRI_vectim)), "vectim");
1951 
1952     /* toss some trash */
1953     VECTIM_destroy(xvectim) ;
1954     DSET_delete(xset) ;
1955 
1956     PRINT_MEM_STATS( "Vectim Unload" );
1957 
1958     /* finito */
1959     INFO_message("Writing output dataset to disk [%s bytes]",
1960                 commaized_integer_string(cset->dblk->total_bytes)) ;
1961 
1962     /* write the dataset */
1963     DSET_write(cset);
1964     WROTE_DSET(cset);
1965 
1966     /* increment our memory stats, since we are relying on the header for this
1967        information, we update the stats before actually freeing the memory */
1968     DEC_MEM_STATS( (DSET_NVOX(cset)*DSET_NVALS(cset)*sizeof(float)), "output dset");
1969 
1970     /* free up the output dataset memory */
1971     DSET_unload(cset);
1972     DSET_delete(cset);
1973 
1974     PRINT_MEM_STATS( "Unload Output Dset" );
1975 
1976     /* force a print */
1977     MEM_STAT = 1;
1978     PRINT_MEM_STATS( "Fin" );
1979 
1980     exit(0) ;
1981 }
1982