1 #include "mrilib.h"
2 
3 /**** Also see file mri_genalign_util.c ****/
4 
5 #ifdef USE_OMP
6 #include <omp.h>
7 #endif
8 
9 #undef  BIGVAL
10 #define BIGVAL 1.e+38   /* this is a big value */
11 
12 /* is a voxel 'good' to use? */
13 
14 #undef  GOOD
15 #define GOOD(i) (mask==NULL || mask[i])
16 
17 /* mark for a good setup */
18 
19 #undef  SMAGIC
20 #define SMAGIC 208921148  /* Zip+4 Code for AFNI Group at NIMH */
21 
22 /*--- global access to setup parameters [see 3ddata.h for GA_setup typedef] ---*/
23 
24 static GA_setup *gstup = NULL ;     /* this struct is critically important */
25 static GA_setup *gstup_bk = NULL ;
26 
27 float GA_get_warped_overlap_fraction(void) ; /* prototype for 'ov' */
28 
29 /* codes indicating the order of operations used to create affine warp */
30 
31 static int   aff_use_before=0 , aff_use_after=0 ;
32 static mat44 aff_before       , aff_after       , aff_gamijk , aff_gamxyz ;
33 
34 /*---------------------------------------------------------------------------*/
35 static int mverb = 0 , mpr = 0 ;
mri_genalign_verbose(int v)36 void mri_genalign_verbose(int v){ mverb = v ; }  /* set internal verbosity */
37 
38 /* for rounding the cost functional -- experimental! */
39 
40 static int mround = 0 ;
mri_genalign_round(int v)41 void mri_genalign_round(int v){ mround = v ; } /* 04 Jun 2021 - don't use */
42 
43 /*---------------------------------------------------------------------------*/
44 /* 27 Aug 2008: replace use of drand48 with myunif, for inter-system control */
45 /*              that is, we'll use identical 64 bit arithmetic for this      */
46 /*           see https://en.wikipedia.org/wiki/Linear_congruential_generator */
47 
48 static const unsigned long long MYa=62003 ;     /* multiplicand */
49 static const unsigned long long MYb=15485863 ;  /* addend */
50 static       unsigned long long MYx=15482917 ;  /* seed */
51 
myunif(void)52 static INLINE float myunif(void)  /* return value is in 0..1 range */
53 {
54   MYx = MYa * MYx + MYb ;
55   return ( ((unsigned int)MYx) / 4294967296.0f ) ;
56 }
57 
58 /* reset the seed == change starting point for 'random' numbers */
59 
myunif_reset(unsigned long long x)60 static void myunif_reset(unsigned long long x){ MYx = x; return; }
61 
62 /*---------------------------------------------------------------------------*/
63 
64 /* Macro to periodically reduce a variable into the range 0..1:
65    for example: PRED01(1.2) == 0.8, PRED01(1.8) == 0.2, et cetera.
66    This capability is used in powell_int to restrict parameter
67    search to a bounded domain.
68    Graphically
69                PRED01(x)|
70                         | /\      /\      /\      /\      /\
71                         |/  \    /  \    /  \    /  \    /
72                         |    \  /    \  /    \  /    \  /
73                         |     \/      \/      \/      \/
74                         +------------------------------------> x
75                           -3  -2  -1   0  +1  +2  +3  +4  +5
76 */
77 
78 #undef  MYfabsf
79 #define MYfabsf(x) fabsf((float)(x))  /* to avoid compiler warnings */
80 
81 #undef  PRED01
82 #define PRED01(x) MYfabsf( (x) - 2.0f*floorf(0.5f*((x)+1.0f)) )
83 
84 /*--- Max number of points to warp at a time ---*/
85 
86 #undef  NPER
87 #define NPER 262144  /* 1 Mbyte per float array */
88 
89 static int nperval = NPER ;
GA_set_nperval(int i)90 void GA_set_nperval( int i ){ nperval = (i > 666) ? i : 16777216 ; }
91 
92 /*--------------------------------------------------------------------*/
93 /*! Interpolate target image to control points in base image space.
94     - Results go into avm, which must be pre-allocated.
95     - If mpar==NULL, then warp parameters are all taken from gstup and
96       the results are calculated at ALL points in the base image.
97 *//*------------------------------------------------------------------*/
98 
GA_get_warped_values(int nmpar,double * mpar,float * avm)99 void GA_get_warped_values( int nmpar , double *mpar , float *avm )
100 {
101    int    npar, ii,jj,kk,qq,pp,npp,mm,nx,ny,nxy, clip=0, npt, nall, nper ;
102    float *wpar, v ;
103    float *imf=NULL, *jmf=NULL, *kmf=NULL ;
104    float *imw, *jmw, *kmw ;
105    MRI_IMAGE *aim ;
106 
107 ENTRY("GA_get_warped_values") ;
108 
109    npar = gstup->wfunc_numpar ;
110    wpar = (float *)calloc(sizeof(float),npar) ;
111    nper = MAX(nperval,NPER) ;
112 
113    /* load ALL the warping parameters, including the fixed values */
114 
115    if( mpar != NULL ){                              /* load from input array */
116 /* STATUS("copy from mpar") ; */
117      for( ii=pp=0 ; ii < npar ; ii++ ){
118        if( gstup->wfunc_param[ii].fixed ){                    /* fixed param */
119          wpar[ii] = gstup->wfunc_param[ii].val_fixed ;
120        } else {                                            /* variable param */
121          v = (float)mpar[pp++] ;
122          wpar[ii] = gstup->wfunc_param[ii].min        /* scale to true range */
123                    +gstup->wfunc_param[ii].siz * PRED01(v) ;
124        }
125      }
126    } else {                                     /* load directly from struct */
127 /* STATUS("copy from gstup") ; */
128      for( ii=0 ; ii < gstup->wfunc_numpar ; ii++ )
129        wpar[ii] = gstup->wfunc_param[ii].val_out ;
130    }
131 
132    /* create space for unwarped indexes, if none given */
133 
134    if( mpar == NULL || gstup->im == NULL ){
135 /* STATUS("make space") ; */
136      npt = gstup->bsim->nvox ; nall = MIN(nper,npt) ;
137      imf = (float *)calloc(sizeof(float),nall) ;
138      jmf = (float *)calloc(sizeof(float),nall) ;
139      kmf = (float *)calloc(sizeof(float),nall) ;
140    } else {
141      npt = gstup->npt_match ; nall = MIN(nper,npt) ;
142    }
143 
144    /* create space for indexes of warped control points */
145 
146 /* STATUS("make more space") ; */
147    imw = (float *)calloc(sizeof(float),nall) ;
148    jmw = (float *)calloc(sizeof(float),nall) ;
149    kmw = (float *)calloc(sizeof(float),nall) ;
150 
151    nx = gstup->bsim->nx; ny = gstup->bsim->ny; nxy = nx*ny;
152 
153    /* send parameters to warping function for its setup */
154 
155 /* STATUS("send params to wfunc") ; */
156    RAND_ROUND ;
157    gstup->wfunc( npar , wpar , 0,NULL,NULL,NULL , NULL,NULL,NULL ) ;
158 
159    /* choose image from which to extract data */
160 
161    aim = (gstup->ajims != NULL && mpar != NULL ) ? gstup->ajims /* smoothed */
162                                                  : gstup->ajim; /* unsmooth */
163 
164    /*--- do (up to) nall points at a time ---*/
165 
166    for( pp=0 ; pp < npt ; pp+=nall ){
167 
168      npp = MIN( nall , npt-pp ) ;  /* number to do in this iteration */
169 
170      if( mpar == NULL || gstup->im == NULL ){  /* do all points */
171 /* STATUS("do all points") ; */
172        for( qq=0 ; qq < npp ; qq++ ){
173          mm = pp+qq ;
174          ii = mm % nx; kk = mm / nxy; jj = (mm-kk*nxy) / nx;
175          imf[qq] = (float)ii; jmf[qq] = (float)jj; kmf[qq] = (float)kk;
176        }
177      } else {
178 /* STATUS("do control points") ; */
179        imf = gstup->im->ar + pp ;  /* pointers to control points */
180        jmf = gstup->jm->ar + pp ;
181        kmf = gstup->km->ar + pp ;
182      }
183 
184      /****-- warp control points to new locations ---****/
185      /**** (warp does index-to-index transformation) ****/
186 
187 /* STATUS("call wfunc for real") ; */
188      RAND_ROUND ;
189      gstup->wfunc( npar , NULL ,
190                    npp  , imf,jmf,kmf , imw,jmw,kmw ) ;
191 
192      /* interpolate target image at warped points */
193 
194 /* STATUS("interpolate") ; */
195 
196      switch( gstup->interp_code ){
197        case MRI_NN:
198          GA_interp_NN( aim , npp , imw,jmw,kmw , avm+pp ) ;
199        break ;
200 
201        case MRI_LINEAR:
202          GA_interp_linear( aim , npp , imw,jmw,kmw , avm+pp ) ;
203        break ;
204 
205        case MRI_CUBIC:
206          clip = 1 ;
207          GA_interp_cubic( aim , npp , imw,jmw,kmw , avm+pp ) ;
208        break ;
209 
210        case MRI_VARP1:  /* don't use this! */
211          clip = 1 ;
212          GA_interp_varp1( aim , npp , imw,jmw,kmw , avm+pp ) ;
213        break ;
214 
215        case MRI_WSINC5:
216          clip = 1 ;
217          GA_interp_wsinc5( aim , npp , imw,jmw,kmw , avm+pp ) ;
218        break ;
219 
220        default:        /* for higher order methods not implemented here */
221        case MRI_QUINTIC:
222          clip = 1 ;
223          GA_interp_quintic( aim , npp , imw,jmw,kmw , avm+pp ) ;
224        break ;
225      }
226 
227    } /* end of loop over matching points */
228 
229    /* free the enslaved memory */
230 
231 /* STATUS("free stuff") ; */
232    free((void *)kmw); free((void *)jmw); free((void *)imw);
233    if( mpar == NULL || gstup->im == NULL ){
234      free((void *)kmf); free((void *)jmf); free((void *)imf);
235    }
236    free((void *)wpar) ;
237 
238    /* clip interpolated values to range of target image, if need be */
239 
240    if( clip ){
241 /* STATUS("clip") ; */
242      float bb=gstup->ajbot , tt=gstup->ajtop ;
243      for( pp=0 ; pp < npt ; pp++ )
244             if( avm[pp] < bb ) avm[pp] = bb ;
245        else if( avm[pp] > tt ) avm[pp] = tt ;
246    }
247 
248    EXRETURN ;
249 }
250 
251 #if 0  /* debugging-tracking stuff */
252 /*---------------------------------------------------------------------------*/
253 /* Stuff for calling a user-supplied function every time the cost
254    function is smaller than the previous minimal value (vbest).
255    GA_fitter_dotter() is a simple example.
256 *//*-------------------------------------------------------------------------*/
257 
258 static float fit_vbest = BIGVAL ;
259 static void (*fit_callback)(int,double *) = NULL ;
260 
261 void GA_reset_fit_callback( void (*fc)(int,double*) )  /* user func is fc */
262 {
263    fit_vbest = BIGVAL ; fit_callback = fc ; return ;
264 }
265 
266 void GA_fitter_dotter(int n, double *mpar){ printf("."); fflush(stdout); }
267 
268 void GA_do_dots(int x){ GA_reset_fit_callback( (x)?GA_fitter_dotter:NULL ); }
269 
270 /*---------------------------------------------------------------------------*/
271 void GA_fitter_coster(int n, double *mpar){
272   printf(" + Cost=%g\r",fit_vbest); fflush(stdout);
273 }
274 
275 void GA_fitter_coster_tab(int n, double *mpar){
276   printf(" + Cost=%g\t",fit_vbest); fflush(stdout);
277 }
278 
279 void GA_do_cost(int x, byte use_tab){
280    if (use_tab) {
281       GA_reset_fit_callback( (x)?GA_fitter_coster_tab:NULL );
282    } else {
283       GA_reset_fit_callback( (x)?GA_fitter_coster:NULL );
284    }
285 }
286 
287 void GA_fitter_params( int n , double *mpar )
288 {
289   int ii , pp ; double wpar , v ; int npar = gstup->wfunc_numpar ;
290   printf(" + Cost=%g Param=",fit_vbest) ;
291   for( ii=pp=0 ; ii < npar ; ii++ ){
292     if( !gstup->wfunc_param[ii].fixed ){  /* variable param */
293       v = mpar[pp++] ;
294       wpar = gstup->wfunc_param[ii].min        /* scale to true range */
295             +gstup->wfunc_param[ii].siz * PRED01(v) ;
296       printf("%g ",wpar) ;
297     }
298   }
299   printf("\n") ; fflush(stdout) ;
300 }
301 
302 void GA_do_params( int x ){
303    GA_reset_fit_callback( (x)?GA_fitter_params:NULL );
304 }
305 #endif  /* debugging-tracking stuff */
306 /*---------------------------------------------------------------------------*/
307 
308 /*---------------------------------------------------------------------------*/
309 /* This function sets up parameters in thd_correlate.c,
310    for how to compute the 2D histogram betwixt base and target.
311 *//*-------------------------------------------------------------------------*/
312 
GA_setup_2Dhistogram(float * xar,float * yar)313 void GA_setup_2Dhistogram( float *xar , float *yar )  /* 08 May 2007 */
314 {
315 ENTRY("GA_setup_2Dhistogram") ;
316 
317    RAND_ROUND ;
318    switch( gstup->hist_mode ){
319 
320      default:
321      case GA_HIST_EQWIDE:
322        set_2Dhist_xybin( 0,NULL,NULL ) ;
323      break ;
324 
325      case GA_HIST_CLEQWD:{
326        int nbin=(int)gstup->hist_param , npt=gstup->npt_match ;
327        float xbc,xtc , ybc,ytc ;
328 
329        if( nbin < 3 ) nbin = 0 ;
330        set_2Dhist_hbin( nbin ) ;
331        set_2Dhist_xyclip( npt , xar , yar ) ;
332 
333        if( mverb > 1 ){
334          (void)get_2Dhist_xyclip( &xbc,&xtc , &ybc,&ytc ) ;
335          ININFO_message(" - histogram: source clip %g .. %g; base clip %g .. %g",
336                         xbc,xtc , ybc,ytc ) ;
337          ININFO_message(" - versus source range %g .. %g; base range %g .. %g",
338                         gstup->ajbot, gstup->ajclip, gstup->bsbot, gstup->bsclip ) ;
339        }
340      }
341      break ;
342 
343      case GA_HIST_EQHIGH:{
344        int nbin=(int)gstup->hist_param , npt=gstup->npt_match , ii,dm,mm,nnew ;
345        float *xx , *yy ;
346 
347        if( npt > 666*nbin ){                /* subsample data to save CPU time */
348          dm = GA_find_relprime_fixed( npt ) ;
349          mm = 1 ; nnew = (int)(314.1593*nbin) ;
350          xx = (float *)malloc(sizeof(float)*nnew) ;
351          yy = (float *)malloc(sizeof(float)*nnew) ;
352          for( ii=0 ; ii < nnew ; ii++,mm=(mm+dm)%npt ){
353            xx[ii] = xar[mm] ; yy[ii] = yar[mm] ;
354          }
355          npt = nnew ;
356        } else {                             /* just use all the data */
357          xx = xar ; yy = yar ;
358        }
359 
360        if( mverb > 1 )
361          ININFO_message("- setting up equalized histogram bins with %d pts",npt) ;
362 
363        set_2Dhist_xybin_eqhigh( nbin , npt , xx , yy ) ;
364        if( xx != xar ){ free(yy); free(xx); }
365 
366        if( mverb > 1 ){
367          nbin = get_2Dhist_xybin( &xx , &yy ) ;
368          ININFO_message("-- %d equalized histogram bins for source follow:",nbin) ;
369          fprintf(stderr,"    ") ;
370          for( ii=0 ; ii <= nbin ; ii++ ) fprintf(stderr," %g",xx[ii]) ;
371          fprintf(stderr,"\n") ;
372          ININFO_message("-- %d equalized histogram bins for base follow:",nbin) ;
373          fprintf(stderr,"    ") ;
374          for( ii=0 ; ii <= nbin ; ii++ ) fprintf(stderr," %g",yy[ii]) ;
375          fprintf(stderr,"\n") ;
376        }
377      }
378      break ;
379    }
380 
381    gstup->need_hist_setup = 0 ; EXRETURN ;
382 }
383 
384 /*---------------------------------------------------------------------------*/
385 
386 #undef  CMAX
387 #define CMAX 0.9999f  /* max correlation to allow */
388 
389 static int lpczz = 0 ;
GA_pearson_ignore_zero_voxels(int z)390 void GA_pearson_ignore_zero_voxels(int z){ lpczz = z; }  /* 23 Feb 2010 */
391 
392 /*---------------------------------------------------------------------*/
393 /*! LPC = Local Pearson Correlation, as described in the famous paper. */
394 /*  [if you never heard of that paper, please back away from the file] */
395 /*---------------------------------------------------------------------*/
396 
GA_pearson_local(int npt,float * avm,float * bvm,float * wvm)397 float GA_pearson_local( int npt , float *avm, float *bvm, float *wvm )
398 {
399    GA_BLOK_set *gbs ;
400    int nblok , nelm , *elm , dd , ii,jj , nm , use_ppow=0 ;
401    float xv,yv,xy,xm,ym,vv,ww,ws,wss , pcor , wt , psum=0.0f , pabs,ppow=1.0f ;
402    static int uwb=-1 , wsold=0 ;  /* flags set by the environment */
403 
404 ENTRY("GA_pearson_local") ;
405 
406    /*-------------------------------------------------------------------------*/
407    /*-- if not already present, set up geometric structure for BLOKs we use --*/
408 
409    if( gstup->blokset == NULL ){
410      float rad=gstup->blokrad , mrad ; float *ima=NULL,*jma=NULL,*kma=NULL ;
411      /* increment blok 'radius' to allow for smoothing radius */
412      if( gstup->smooth_code > 0 && gstup->smooth_radius_base > 0.0f )
413        rad = sqrt( rad*rad +SQR(gstup->smooth_radius_base) ) ;
414      /* minimum blok 'radius' */
415      mrad = 1.2345f*(gstup->base_di + gstup->base_dj + gstup->base_dk) ;
416      rad  = MAX(rad,mrad) ;
417      if( gstup->im != NULL ) ima = gstup->im->ar ;  /* 19 Feb 2010: oopsie */
418      if( gstup->jm != NULL ) jma = gstup->jm->ar ;  /* points to include */
419      if( gstup->km != NULL ) kma = gstup->km->ar ;
420      gstup->blokset = create_GA_BLOK_set(  /* cf. mri_genalign_util.c */
421                             gstup->bsim->nx, gstup->bsim->ny, gstup->bsim->nz,
422                             gstup->base_di , gstup->base_dj , gstup->base_dk ,
423                             gstup->npt_match , ima,jma,kma ,
424                             gstup->bloktype , rad , gstup->blokmin , 1.0f,mverb ) ;
425      if( gstup->blokset == NULL )
426        ERROR_exit("Can't create GA_BLOK_set?!?") ;  /* should not happen */
427    }
428    /*-------------------------------------------------------------------------*/
429 
430    gbs   = gstup->blokset ;
431    nblok = gbs->num ;
432    if( nblok < 1 ) ERROR_exit("LPC: Bad GA_BLOK_set?!") ;
433    ppow  = gbs->ppow ; use_ppow = (ppow != 1.0f) ;  /* 28 Jan 2021 */
434 
435    if( uwb < 0 ){
436      uwb   = AFNI_yesenv("AFNI_LPC_UNWTBLOK") ;  /* first time in */
437      wsold = AFNI_yesenv("AFNI_LPC_OLDWSUM") ;   /* 02 Mar 2010 */
438    }
439 
440    /*--- loop over blocks, compute local correlations ---*/
441 
442    for( wss=0.0f,dd=0 ; dd < nblok ; dd++ ){
443      nelm = gbs->nelm[dd] ;     /* number of points inside this blok */
444      if( nelm < 9 ) continue ;  /* too few? skip this blok */
445      elm = gbs->elm[dd] ;       /* list of 1D index to points for this blok */
446 
447      RAND_ROUND ; /* 26 Feb 2020 */
448 
449      if( wvm == NULL ){   /*** unweighted correlation ***/
450        xv=yv=xy=xm=ym=0.0f ; ws = 1.0f ;
451        for( ii=0 ; ii < nelm ; ii++ ){
452          jj = elm[ii] ;                  /* index of ii-th point in blok */
453          xm += avm[jj] ; ym += bvm[jj] ;
454        }
455        xm /= nelm ; ym /= nelm ;         /* mean of x and y inside blok */
456        for( ii=0 ; ii < nelm ; ii++ ){
457          jj = elm[ii] ;
458          vv = avm[jj]-xm ; ww = bvm[jj]-ym ;
459          xv += vv*vv ; yv += ww*ww ; xy += vv*ww ; /* sums needed */
460        }
461 
462      } else {             /*** weighted correlation ***/
463        xv=yv=xy=xm=ym=ws=0.0f ;
464        for( ii=0 ; ii < nelm ; ii++ ){
465          jj = elm[ii] ;
466          wt = wvm[jj] ; ws += wt ;
467          xm += avm[jj]*wt ; ym += bvm[jj]*wt ;
468        }
469        xm /= ws ; ym /= ws ;            /* weighted averages */
470        for( ii=0 ; ii < nelm ; ii++ ){
471          jj = elm[ii] ;
472          wt = wvm[jj] ; vv = avm[jj]-xm ; ww = bvm[jj]-ym ;
473          xv += wt*vv*vv ; yv += wt*ww*ww ; xy += wt*vv*ww ;
474        }
475        if( uwb ) ws = 1.0f ;
476      }
477      if( wsold ) wss += ws ;                /* the olden way was to add first */
478 
479      /*** massage results to get final stretched correlation ***/
480 
481      if( xv <= 0.0f || yv <= 0.0f ) continue ;      /* skip this blok */
482      pcor = xy/sqrtf(xv*yv) ;                       /* correlation */
483           if( pcor >  CMAX ) pcor =  CMAX ;         /* limit the range (avoid bad juju) */
484      else if( pcor < -CMAX ) pcor = -CMAX ;
485      pcor = logf( (1.0f+pcor)/(1.0f-pcor) ) ;       /* 2*arctanh() = stretch large values */
486      pabs = MYfabsf(pcor) ;
487      if( use_ppow ) pabs = powf(pabs,ppow) ;        /* 28 Jan 2021 */
488      psum += ws * pcor * pabs ;                     /* emphasize large values even more */
489      if( !wsold ) wss += ws ;                       /* moved here 02 Mar 2010 */
490    }
491 
492    RETURN(0.25f*psum/wss);      /* averaged stretched emphasized correlation */
493 }                                /* [0.25 to compensate for the 2*arctanh()] */
494 
495 /*------------------------------------------------------------*/
496 /*! LSC = Local Spearman Correlation (not described anywhere) */
497 /** And this is very slow, so it not recommended!!!          **/
498 /*------------------------------------------------------------*/
499 
GA_spearman_local(int npt,float * avm,float * bvm,float * wvm)500 float GA_spearman_local( int npt , float *avm, float *bvm, float *wvm )
501 {
502    GA_BLOK_set *gbs ;
503    int nblok , nelm , *elm , dd , ii,jj ;
504    float wss , pcor , psum=0.0f ;
505    float *xt=NULL , *yt=NULL ; int nxt=0 ;
506 
507    if( gstup->blokset == NULL ){
508      float rad=gstup->blokrad , mrad ; float *ima=NULL,*jma=NULL,*kma=NULL ;
509      if( gstup->smooth_code > 0 && gstup->smooth_radius_base > 0.0f )
510        rad = sqrt( rad*rad +SQR(gstup->smooth_radius_base) ) ;
511      mrad = 1.2345f*(gstup->base_di + gstup->base_dj + gstup->base_dk) ;
512      rad  = MAX(rad,mrad) ;
513      if( gstup->im != NULL ) ima = gstup->im->ar ;  /* 19 Feb 2010: oopsie */
514      if( gstup->jm != NULL ) jma = gstup->jm->ar ;
515      if( gstup->km != NULL ) kma = gstup->km->ar ;
516      gstup->blokset = create_GA_BLOK_set(  /* cf. mri_genalign_util.c */
517                             gstup->bsim->nx, gstup->bsim->ny, gstup->bsim->nz,
518                             gstup->base_di , gstup->base_dj , gstup->base_dk ,
519                             gstup->npt_match , ima,jma,kma ,
520                             gstup->bloktype , rad , gstup->blokmin , 1.0f,mverb ) ;
521      if( gstup->blokset == NULL )
522        ERROR_exit("Can't create GA_BLOK_set?!?") ;
523    }
524 
525    gbs   = gstup->blokset ;
526    nblok = gbs->num ;
527    if( nblok < 1 ) ERROR_exit("LPC: Bad GA_BLOK_set?!") ;
528 
529    for( wss=0.0001f,dd=0 ; dd < nblok ; dd++ ){
530      nelm = gbs->nelm[dd] ; if( nelm < 9 ) continue ;
531      elm = gbs->elm[dd] ;
532      RAND_ROUND ;
533      if( nelm > nxt ){
534        xt = (float *)realloc(xt,sizeof(float)*nelm) ;
535        yt = (float *)realloc(yt,sizeof(float)*nelm) ; nxt = nelm ;
536      }
537      for( ii=0 ; ii < nelm ; ii++ ){
538        jj = elm[ii] ; xt[ii] = avm[jj] ; yt[ii] = bvm[jj] ;
539      }
540      pcor = THD_spearman_corr( nelm , xt , yt ) ;
541      pcor = 2.0f * sinf( 0.523599f * pcor ) ;  /* 0.523599 = PI/6 */
542      if( pcor > CMAX ) pcor = CMAX; else if( pcor < -CMAX ) pcor = -CMAX;
543      pcor = logf( (1.0f+pcor)/(1.0f-pcor) ) ;  /* 2*arctanh() */
544      psum += pcor * MYfabsf(pcor) ;              /* emphasize large values */
545      wss++ ;
546    }
547    if( xt != NULL ){ free(yt) ; free(xt) ; }
548 
549    return (0.25f*psum/wss);      /* averaged stretched emphasized correlation */
550 }                                /* [0.25 to compensate for the 2*arctanh()] */
551 
552 /*----------------------------------------------------------------------------*/
553 /* Set the lpc+ parameters, where
554    MICHO = Mutual Information : Correlation ratio : Hellinger metric : Overlap
555 *//*--------------------------------------------------------------------------*/
556 
557 static double micho_mi  = 0.1 ;  /* 24 Feb 2010 */
558 static double micho_nmi = 0.1 ;
559 static double micho_crA = 0.5 ;
560 static double micho_hel = 0.3 ;
561 static double micho_ov  = 0.0 ;
562 
GA_setup_micho(double a,double b,double c,double d,double e)563 void GA_setup_micho( double a, double b, double c, double d, double e )
564 {
565    micho_hel = a; micho_mi = b; micho_nmi = c; micho_crA = d; micho_ov = e;
566 }
567 
568 /*---------------------------------------------------------------------------*/
569 /*! Compute a particular fit cost function
570      - avm = target image values warped to base
571      - bvm = base image values
572      - wvm = weight image values
573     If you ever invent a new cost function for 3dAllineate, it MUST go here!
574 *//*-------------------------------------------------------------------------*/
575 
GA_scalar_costfun(int meth,int npt,float * avm,float * bvm,float * wvm)576 double GA_scalar_costfun( int meth , int npt ,
577                           float *avm , float *bvm , float *wvm )
578 {
579   double val=0.0f ;
580 
581 ENTRY("GA_scalar_costfun") ;
582 
583    RAND_ROUND ; /* 26 Feb 2020 */
584 
585   switch( meth ){
586 
587     default:
588     case GA_MATCH_PEARSON_SCALAR:   /* Pearson correlation coefficient */
589       val = (double)THD_pearson_corr_wt( gstup->npt_match, avm, bvm,wvm ) ;
590       val = 1.0 - fabs(val) ;
591     break ;
592 
593     case GA_MATCH_PEARSON_SIGNED:
594       val = (double)THD_pearson_corr_wt( gstup->npt_match, avm, bvm,wvm ) ;
595     break ;
596 
597     case GA_MATCH_PEARSON_LOCALS:  /* LPC */
598       val = (double)GA_pearson_local( gstup->npt_match, avm, bvm,wvm ) ;
599     break ;
600 
601     case GA_MATCH_PEARSON_LOCALA:  /* LPA */
602       val = (double)GA_pearson_local( gstup->npt_match, avm, bvm,wvm ) ;
603       val = 1.0 - fabs(val) ;
604     break ;
605 
606     case GA_MATCH_NCDZLIB:         /* NCD -- 02 Mar 2009 */
607       val = (double)THD_ncdfloat_scl( gstup->npt_match ,
608                                       gstup->ajbot , gstup->ajclip , avm ,
609                                       gstup->bsbot , gstup->bsclip , bvm  ) ;
610     break ;
611 
612     case GA_MATCH_SPEARMAN_SCALAR:  /* rank-order (Spearman) correlation */
613       val = (double)THD_spearman_corr_nd( gstup->npt_match , avm,bvm ) ;
614       val = 1.0 - fabs(val) ;
615     break ;
616 
617     case GA_MATCH_KULLBACK_SCALAR:  /* AKA Mutual Information */
618       val = -THD_mutual_info_scl( gstup->npt_match ,
619                                   gstup->ajbot , gstup->ajclip , avm ,
620                                   gstup->bsbot , gstup->bsclip , bvm , wvm ) ;
621     break ;
622 
623     case GA_MATCH_NORMUTIN_SCALAR:  /* Normalized Mutual Information */
624       val = THD_norm_mutinf_scl( gstup->npt_match ,
625                                  gstup->ajbot , gstup->ajclip , avm ,
626                                  gstup->bsbot , gstup->bsclip , bvm , wvm ) ;
627     break ;
628 
629     case GA_MATCH_JOINTENT_SCALAR:  /* Joint Entropy */
630       val = THD_jointentrop_scl( gstup->npt_match ,
631                                  gstup->ajbot , gstup->ajclip , avm ,
632                                  gstup->bsbot , gstup->bsclip , bvm , wvm ) ;
633     break ;
634 
635     case GA_MATCH_CRAT_USYM_SCALAR: /* Correlation ratio (various flavors) */
636     case GA_MATCH_CRAT_SADD_SCALAR:
637     case GA_MATCH_CORRATIO_SCALAR:
638            if( meth==GA_MATCH_CRAT_USYM_SCALAR )THD_corr_ratio_sym_not;
639       else if( meth==GA_MATCH_CRAT_SADD_SCALAR )THD_corr_ratio_sym_add;
640       else                                      THD_corr_ratio_sym_mul;
641 
642 #if 0
643       if( mverb > 8 )
644        INFO_message("THD_corr_ratio_scl(%d,%g,%g,bvm,%g,%g,bvm,wvm)",
645                      gstup->npt_match ,
646                      gstup->bsbot , gstup->bsclip ,
647                      gstup->ajbot , gstup->ajclip  ) ;
648 #endif
649 
650       val = THD_corr_ratio_scl( gstup->npt_match ,
651                                 gstup->bsbot , gstup->bsclip , bvm ,
652                                 gstup->ajbot , gstup->ajclip , avm , wvm ) ;
653       val = 1.0 - fabs(val) ;
654     break ;
655 
656     case GA_MATCH_HELLINGER_SCALAR: /* Hellinger metric */
657       val = -THD_hellinger_scl( gstup->npt_match ,
658                                 gstup->ajbot , gstup->ajclip , avm ,
659                                 gstup->bsbot , gstup->bsclip , bvm , wvm ) ;
660     break ;
661 
662     /* 24 Feb 2010: a combined method, using multiple functionals */
663 
664     case GA_MATCH_LPC_MICHO_SCALAR:{
665       val = (double)GA_pearson_local( gstup->npt_match, avm, bvm,wvm ) ;
666 
667       if( micho_hel != 0.0 || micho_mi  != 0.0 ||
668           micho_nmi != 0.0 || micho_crA != 0.0   ){
669         float_quad hmc ; float ovv ;
670         hmc = THD_helmicra_scl( gstup->npt_match ,
671                                 gstup->ajbot , gstup->ajclip , avm ,
672                                 gstup->bsbot , gstup->bsclip , bvm , wvm ) ;
673         val += -micho_hel * hmc.a - micho_mi  * hmc.b
674                +micho_nmi * hmc.c + micho_crA * (1.0-fabs(hmc.d)) ;
675 
676         if( micho_ov != 0.0 && gstup->bsmask != NULL && gstup->ajmask != NULL ){
677           ovv = GA_get_warped_overlap_fraction() ;
678           ovv = MAX(0.0f,9.95f-10.0f*ovv) ;
679           val += micho_ov * ovv*ovv ;
680         }
681       }
682     }
683     break ;
684 
685     /* 28 Nov 2018: combined method, but for lpa instead of lpc [RWC] */
686 
687     case GA_MATCH_LPA_MICHO_SCALAR:{
688       val = (double)GA_pearson_local( gstup->npt_match, avm, bvm,wvm ) ;
689       val = 1.0 - fabs(val) ;
690 
691       if( micho_hel != 0.0 || micho_mi  != 0.0 ||
692           micho_nmi != 0.0 || micho_crA != 0.0   ){
693         float_quad hmc ; float ovv ;
694         hmc = THD_helmicra_scl( gstup->npt_match ,
695                                 gstup->ajbot , gstup->ajclip , avm ,
696                                 gstup->bsbot , gstup->bsclip , bvm , wvm ) ;
697         val += -micho_hel * hmc.a - micho_mi  * hmc.b
698                +micho_nmi * hmc.c + micho_crA * (1.0-fabs(hmc.d)) ;
699 
700         if( micho_ov != 0.0 && gstup->bsmask != NULL && gstup->ajmask != NULL ){
701           ovv = GA_get_warped_overlap_fraction() ;
702           ovv = MAX(0.0f,9.95f-10.0f*ovv) ;
703           val += micho_ov * ovv*ovv ;
704         }
705       }
706     }
707     break ;
708 
709   }
710 
711   if( !isfinite(val) ) val = BIGVAL ;
712   RETURN( val );
713 }
714 
715 /*---------------------------------------------------------------------------*/
716 /*! Fit metric for matching base and target image value pairs.
717     (Smaller is a better match.)
718     For use as a NEWUOA optimization function.
719 *//*-------------------------------------------------------------------------*/
720 
GA_scalar_fitter(int npar,double * mpar)721 double GA_scalar_fitter( int npar , double *mpar )
722 {
723   double val ;
724   float *avm , *bvm , *wvm ;
725 
726 ENTRY("GA_scalar_fitter") ;
727 
728   avm = (float *)calloc(gstup->npt_match,sizeof(float)) ; /* target points at */
729   GA_get_warped_values( npar , mpar , avm ) ;             /* warped locations */
730 
731   bvm = gstup->bvm->ar ;                                  /* base points */
732   wvm = (gstup->wvm != NULL) ? gstup->wvm->ar : NULL ;    /* weights */
733 
734   if( gstup->need_hist_setup ) GA_setup_2Dhistogram( avm , bvm ) ;
735 
736   /* evaluate the cost function! */
737 
738   val = GA_scalar_costfun( gstup->match_code, gstup->npt_match, avm,bvm,wvm ) ;
739 
740   free((void *)avm) ;    /* toss the trash */
741 
742   /** the code below implements the -round option of 3dAllineate **/
743 
744 #if 1
745 #define VBASE 2097152.0   /* 2^21 = keep 21 bits of precision in val */
746   if( mround && val != 0.0 && isfinite(val) ){
747     double vvv ; int eee ;
748 
749     /* x=frexp(val,&e) return x,e such that 0.5 <= x < 1 and val = x * 2^e;  */
750     /* so 0.5*VASE <= x*VBASE < VBASE;                                       */
751     /* so round() will be the nearest integer between 0.5*VBASE and VBASE;   */
752     /* that is, between 2^20 and 2^21, which is to say a 21 bit integer;     */
753     /* so we scale it back down by VBASE to make it between 0.5 and 1 again. */
754 
755     vvv = round( VBASE * frexp(val,&eee) ) / VBASE ;
756 
757     /* And then we reassemble it back to a floating point number with the    */
758     /* exponent in eee, using the scalbn() function -- see how easy it is!?  */
759 
760     val = scalbn(vvv,eee) ;
761 
762     /** Why this rigmarole? To test if keeping a few bits less precision
763         would make the results on different systems more similar, since
764         the effects of round-off error in the costfun evaluation would
765         be reduced. However, this test failed -- rounding didn't help :( **/
766   }
767 #undef VBASE
768 #endif
769 
770 #if 1
771   if( mverb > 1 ){ /* print stuff out at each evaluation [masochistic] */
772     static double vsmall=1.e+37 ; static int ncall=0 ;
773     if( vsmall > val ){
774       if( ncall > 0 ){
775         if( mverb == 2 ) fprintf(stderr,"*") ;
776         else             fprintf(stderr,"*[#%d=%.6g] ",ncall,val) ;
777         mpr++ ;
778       }
779       vsmall = val ;
780     } else if( mverb > 6 ){
781                          fprintf(stderr," [#%d=%.6g] ",ncall,val) ; mpr++ ;
782     }
783     ncall++ ;
784   }
785 #endif
786 
787   RETURN(val);
788 }
789 
790 /*---------------------------------------------------------------------------*/
791 /* Clear the weight */
792 
mri_genalign_scalar_clrwght(GA_setup * stup)793 void mri_genalign_scalar_clrwght( GA_setup *stup )  /* 18 Oct 2006 */
794 {
795 ENTRY("mri_genalign_scalar_clrwght") ;
796   if( stup != NULL ){
797     if( stup->bwght != NULL ) mri_free(stup->bwght) ;
798     if( stup->bmask != NULL ) free((void *)stup->bmask) ;
799     stup->nmask = stup->nvox_mask = 0 ;
800     stup->bwght = NULL ; stup->bmask = NULL ;
801   }
802   EXRETURN ;
803 }
804 
805 /*---------------------------------------------------------------------------*/
806 /* Macro for exiting mri_genalign_scalar_setup() with extreme prejudice.     */
807 
808 #undef  ERREX
809 #define ERREX(s) \
810  do{ ERROR_message("mri_genalign_scalar_setup: %s",(s)); EXRETURN; } while(0)
811 
812 /*---------------------------------------------------------------------------*/
813 /*! Setup for generic alignment of scalar images.
814     - If this is a new alignment, images basim and targim must be input;
815       wghtim is optional.
816     - If this is a continuation of a previously started alignment
817       with modified parameters (e.g., smoothing method/radius), then image
818       copies are already stored in stup and don't need to be resupplied.
819     - cf. 3dAllineate.c for use of this function!
820 *//*-------------------------------------------------------------------------*/
821 
mri_genalign_scalar_setup(MRI_IMAGE * basim,MRI_IMAGE * wghtim,MRI_IMAGE * targim,GA_setup * stup)822 void mri_genalign_scalar_setup( MRI_IMAGE *basim  , MRI_IMAGE *wghtim ,
823                                 MRI_IMAGE *targim , GA_setup  *stup    )
824 
825 {
826    int qq , rr , nx,ny,nz,nxy , mm,ii,jj,kk , qdim ;
827    int use_all=0 , need_pts=0 , nmatch ;
828    int need_smooth_base , do_smooth_base ;
829    int need_smooth_targ , do_smooth_targ , got_new_targ=0 ;
830    float *bsar ;
831 
832 ENTRY("mri_genalign_scalar_setup") ;
833 
834    /* basic checks of input for rationality:
835       - Must have stup struct (setup parameters)
836       - Must have new base image (basim) or have it previously stored in stup
837       - Must have new target image (targim) or have previously stored version */
838 
839    if( mverb > 1 ) ININFO_message("* Enter alignment setup routine") ;
840 
841    if( stup == NULL ) ERREX("stup is NULL") ;
842    stup->setup = 0 ;  /* mark stup struct as not being ready yet */
843 
844    if( basim  == NULL && stup->bsim == NULL ) ERREX("basim is NULL") ;
845    if( targim == NULL && stup->ajim == NULL ) ERREX("targim is NULL") ;
846 
847    /* check dimensionality of input images (2D or 3D) */
848 
849    if( basim != NULL ){
850      qdim = mri_dimensionality(basim) ;
851      if( qdim < 2 || qdim > 3 )
852        ERREX("basim dimensionality is not 2 or 3") ;
853    } else {
854      qdim = mri_dimensionality(stup->bsim) ;
855    }
856    stup->abdim = qdim ;
857 
858    if( targim != NULL ){
859      if( qdim != mri_dimensionality(targim) )
860        ERREX("basim & targim dimensionalities differ") ;
861    }
862 
863    if( stup->wfunc_numpar < 1 || stup->wfunc==NULL || stup->wfunc_param==NULL )
864      ERREX("illegal wfunc parameters") ;
865 
866    stup->dim_avec = stup->dim_bvec = 1 ;  /* scalars */
867 
868    /** copy new images into setup struct **/
869 
870    if( basim != NULL ){
871 STATUS("copy basim") ;
872      need_pts = 1 ;              /* will need to extract match points */
873      if( stup->bsim != NULL ) mri_free(stup->bsim) ;
874      if( mverb > 1 ) ININFO_message("- copying base image") ;
875      stup->bsim = mri_to_float(basim) ;
876      if( stup->bsim->dx <= 0.0f ) stup->bsim->dx = 1.0f ;
877      if( stup->bsim->dy <= 0.0f ) stup->bsim->dy = 1.0f ;
878      if( stup->bsim->dz <= 0.0f ) stup->bsim->dz = 1.0f ;
879 
880      /* and set the index-to-coordinate matrix cmat */
881      if( !ISVALID_MAT44(stup->base_cmat) ){
882        LOAD_DIAG_MAT44( stup->base_cmat ,
883                         stup->bsim->dx , stup->bsim->dy , stup->bsim->dz ) ;
884        LOAD_MAT44_VEC( stup->base_cmat ,
885                        -(stup->bsim->nx-1)*0.5f*stup->bsim->dx ,
886                        -(stup->bsim->ny-1)*0.5f*stup->bsim->dy ,
887                        -(stup->bsim->nz-1)*0.5f*stup->bsim->dz  ) ;
888      }
889      stup->base_imat = MAT44_INV( stup->base_cmat ) ;
890      stup->base_di = MAT44_COLNORM(stup->base_cmat,0) ;  /* 22 Aug 2007 */
891      stup->base_dj = MAT44_COLNORM(stup->base_cmat,1) ;
892      stup->base_dk = MAT44_COLNORM(stup->base_cmat,2) ;
893      if( stup->bsims != NULL ){ mri_free(stup->bsims); stup->bsims = NULL; }
894    }
895    nx = stup->bsim->nx; ny = stup->bsim->ny; nz = stup->bsim->nz; nxy = nx*ny;
896 
897    if( targim != NULL ){
898 STATUS("copy targim") ;
899      if( stup->ajim != NULL ) mri_free(stup->ajim) ;
900      if( mverb > 1 ) ININFO_message("- copying source image") ;
901      stup->ajim = mri_to_float(targim) ;
902      if( stup->ajim->dx <= 0.0f ) stup->ajim->dx = 1.0f ;
903      if( stup->ajim->dy <= 0.0f ) stup->ajim->dy = 1.0f ;
904      if( stup->ajim->dz <= 0.0f ) stup->ajim->dz = 1.0f ;
905 
906      if( !ISVALID_MAT44(stup->targ_cmat) ){
907        LOAD_DIAG_MAT44( stup->targ_cmat ,
908                         stup->ajim->dx , stup->ajim->dy , stup->ajim->dz ) ;
909        LOAD_MAT44_VEC( stup->targ_cmat ,
910                        -(stup->ajim->nx-1)*0.5f*stup->ajim->dx ,
911                        -(stup->ajim->ny-1)*0.5f*stup->ajim->dy ,
912                        -(stup->ajim->nz-1)*0.5f*stup->ajim->dz  ) ;
913      }
914      stup->targ_imat = MAT44_INV( stup->targ_cmat ) ;
915      stup->targ_di = MAT44_COLNORM(stup->targ_cmat,0) ;  /* 22 Aug 2007 */
916      stup->targ_dj = MAT44_COLNORM(stup->targ_cmat,1) ;
917      stup->targ_dk = MAT44_COLNORM(stup->targ_cmat,2) ;
918 
919      if( stup->ajims  != NULL ){ mri_free(stup->ajims) ; stup->ajims  = NULL; }
920      if( stup->ajimor != NULL ){ mri_free(stup->ajimor); stup->ajimor = NULL; }
921 
922      if( stup->ajmask != NULL && stup->ajmask->nvox != stup->ajim->nvox ){
923        WARNING_message("mri_genalign_scalar_setup: target image/mask mismatch") ;
924        mri_free(stup->ajmask) ; stup->ajmask = NULL ;
925      }
926 
927      /* if filling the outside of the target mask with random junk */
928 
929      if( stup->ajmask != NULL && stup->ajmask_ranfill ){ /* set aj_u??? variables for mask noise */
930        float *af, *qf ; byte *mmm ; float_pair quam ; float ubot,usiz , u1,u2;
931        MRI_IMAGE *qim ; int nvox,pp ;
932        stup->ajimor = mri_copy(stup->ajim) ;  /* backup of original image */
933        if( stup->usetemp ) mri_purge( stup->ajimor ) ;  /* save for later */
934        af  = MRI_FLOAT_PTR(stup->ajim) ;
935        mmm = MRI_BYTE_PTR (stup->ajmask) ;
936        qim = mri_new_conforming(stup->ajim,MRI_float);
937        qf  = MRI_FLOAT_PTR(qim); nvox = qim->nvox;
938        for( ii=pp=0 ; ii < nvox ; ii++ ){ if( mmm[ii] ) qf[pp++] = af[ii] ; }
939        qim->nvox = pp; quam = mri_twoquantiles(qim,0.07f,0.93f); mri_free(qim);
940        stup->aj_ubot = quam.a ; stup->aj_usiz = (quam.b - quam.a)*0.5f ;
941      } else {
942        stup->aj_ubot = stup->aj_usiz = 0.0f ;
943      }
944      got_new_targ = 1 ;
945    } /* end of processing input targim */
946 
947    /* smooth images if needed
948       13 Oct 2006: separate smoothing radii for base and target */
949 
950    need_smooth_base = (stup->smooth_code > 0 && stup->smooth_radius_base > 0.0f);
951    need_smooth_targ = (stup->smooth_code > 0 && stup->smooth_radius_targ > 0.0f);
952    do_smooth_base   = need_smooth_base &&
953                      ( stup->smooth_code        != stup->old_sc      ||
954                        stup->smooth_radius_base != stup->old_sr_base ||
955                        stup->bsims              == NULL                ) ;
956    do_smooth_targ   = need_smooth_targ &&
957                      ( stup->smooth_code        != stup->old_sc      ||
958                        stup->smooth_radius_targ != stup->old_sr_targ ||
959                        stup->ajims              == NULL                ) ;
960    stup->old_sc      = stup->smooth_code   ;
961    stup->old_sr_base = stup->smooth_radius_base ;
962    stup->old_sr_targ = stup->smooth_radius_targ ;
963 
964    if( !need_smooth_base ){
965      if( stup->bsims != NULL ){ mri_free(stup->bsims); stup->bsims = NULL; }
966      stup->old_sr_base = -1.0f ;
967      mri_unpurge(stup->bsim) ; /* 20 Dec 2006 */
968    }
969    if( !need_smooth_targ ){
970      if( stup->ajims != NULL ){ mri_free(stup->ajims); stup->ajims = NULL; }
971      stup->old_sr_targ = -1.0f ;
972      mri_unpurge(stup->ajim) ; /* 20 Dec 2006 */
973    }
974    if( do_smooth_base ){
975 STATUS("smooth base") ;
976      if( stup->bsims != NULL ) mri_free(stup->bsims);
977      if( mverb > 1 )
978        ININFO_message("- Smoothing base; radius=%.2f",stup->smooth_radius_base);
979      stup->bsims = GA_smooth( stup->bsim , stup->smooth_code ,
980                                            stup->smooth_radius_base ) ;
981      if( stup->usetemp ) mri_purge( stup->bsim ) ;  /* 20 Dec 2006 */
982    }
983    if( do_smooth_targ ){
984 STATUS("smooth targ") ;
985      if( stup->ajims != NULL ) mri_free(stup->ajims);
986      if( mverb > 1 )
987        ININFO_message("- Smoothing source; radius=%.2f",stup->smooth_radius_targ);
988      stup->ajims = GA_smooth( stup->ajim , stup->smooth_code ,
989                                            stup->smooth_radius_targ ) ;
990      if( stup->usetemp ) mri_purge( stup->ajim ) ;  /* 20 Dec 2006 */
991      got_new_targ = 1 ;
992    }
993 
994    /* 07 Aug 2007: deal with target mask, if any [moved here 28 Aug 2008] */
995    /* that is, add noise to non-mask parts of the image, AFTER smoothing */
996 
997    if( got_new_targ && stup->ajmask != NULL && stup->aj_usiz > 0.0f && stup->ajmask_ranfill ){
998      float ubot=stup->aj_ubot , usiz=stup->aj_usiz , u1,u2 ;
999      float *af ; byte *mmm ; int nvox ;
1000      if( stup->ajims != NULL ) af = MRI_FLOAT_PTR(stup->ajims) ;
1001      else                      af = MRI_FLOAT_PTR(stup->ajim ) ;
1002      mmm = MRI_BYTE_PTR(stup->ajmask) ;
1003      nvox = stup->ajmask->nvox ;
1004      if( mverb > 2 ) ININFO_message("!source mask fill: ubot=%g usiz=%g",ubot,usiz);
1005      myunif_reset(1234567890) ;  /* to get the same numbers every time */
1006      for( ii=0 ; ii < nvox ; ii++ ){  /* fill non-mask voxels with noise */
1007        if( !mmm[ii] ){ u1 = myunif(); u2 = myunif(); af[ii] = ubot + usiz*(u1+u2); }
1008      }
1009    }
1010 
1011    /* get min and max values in base and target images */
1012 
1013 STATUS("get min/max") ;
1014    if( stup->ajims == NULL ){
1015      stup->ajbot = (float)mri_min(stup->ajim) ;
1016      stup->ajtop = (float)mri_max(stup->ajim) ;
1017      if( stup->ajbot >= 0.0f ) stup->ajclip = mri_topclip(stup->ajim) ;
1018      else                      stup->ajclip = stup->ajtop ;
1019    } else {
1020      stup->ajbot = (float)mri_min(stup->ajims) ;
1021      stup->ajtop = (float)mri_max(stup->ajims) ;
1022      if( stup->ajbot >= 0.0f ) stup->ajclip = mri_topclip(stup->ajims) ;
1023      else                      stup->ajclip = stup->ajtop ;
1024    }
1025 
1026    if( stup->bsims == NULL ){
1027      stup->bsbot = (float)mri_min(stup->bsim) ;
1028      stup->bstop = (float)mri_max(stup->bsim) ;
1029      if( stup->bsbot >= 0.0f ) stup->bsclip = mri_topclip(stup->bsim) ;
1030      else                      stup->bsclip = stup->bstop ;
1031    } else {
1032      stup->bsbot = (float)mri_min(stup->bsims) ;
1033      stup->bstop = (float)mri_max(stup->bsims) ;
1034      if( stup->bsbot >= 0.0f ) stup->bsclip = mri_topclip(stup->bsims) ;
1035      else                      stup->bsclip = stup->bstop ;
1036    }
1037 
1038    /** load weight and mask arrays **/
1039 
1040    if( wghtim != NULL ){              /*---- have new weight to load ----*/
1041      MRI_IMAGE *qim ; float *bar , bfac ;
1042 
1043 STATUS("load weight and mask") ;
1044      need_pts = 1 ;
1045      if( wghtim->nvox != stup->bsim->nvox )
1046        ERREX("basim and wghtim grids differ!?!") ;
1047 
1048      if( stup->bwght != NULL ) mri_free(stup->bwght) ;
1049      if( stup->bmask != NULL ) free((void *)stup->bmask) ;
1050 
1051      if( mverb > 1 ) ININFO_message("- copying weight image") ;
1052 
1053      stup->bwght = mri_to_float(wghtim) ; bar = MRI_FLOAT_PTR(stup->bwght) ;
1054      qim = mri_new_conforming(wghtim,MRI_byte); stup->bmask = MRI_BYTE_PTR(qim);
1055      bfac = (float)mri_maxabs(stup->bwght) ;
1056      if( bfac == 0.0f ) ERREX("wghtim is all zero?!?") ;
1057      bfac = 1.0f / bfac ;
1058      for( ii=0 ; ii < wghtim->nvox ; ii++ ){   /* scale weight, make mask */
1059        bar[ii] = MYfabsf(bar[ii])*bfac ;
1060        stup->bmask[ii] = (bar[ii] != 0.0f) ;
1061      }
1062 
1063      mri_fix_data_pointer( NULL , qim ) ; mri_free(qim) ;
1064      stup->nmask = THD_countmask( wghtim->nvox , stup->bmask ) ;
1065      if( stup->nmask < 99 ){
1066        WARNING_message("mri_genalign_scalar: illegal input mask") ;
1067        free(stup->bmask) ;
1068        stup->bmask = NULL ; stup->nmask = stup->nvox_mask = 0 ;
1069        mri_free(stup->bwght) ; stup->bwght = NULL ;
1070      } else {
1071        stup->nvox_mask = wghtim->nvox ;
1072      }
1073 
1074    } else if( stup->nmask > 0 ){  /*---- have old mask to check ----*/
1075 
1076      if( stup->nvox_mask != stup->bsim->nvox ){
1077        WARNING_message("old mask and new base image differ in size") ;
1078        if( stup->bwght != NULL ) mri_free(stup->bwght) ;
1079        if( stup->bmask != NULL ) free((void *)stup->bmask) ;
1080        stup->nmask = stup->nvox_mask = 0 ;
1081        stup->bmask = NULL ; stup->bwght = NULL ;
1082      } else if( mverb > 1 )
1083        ININFO_message("- retaining old weight image") ;
1084 
1085    } else {                           /*---- have no mask, new or old ----*/
1086      stup->bmask = NULL ;
1087      stup->bwght = NULL ;
1088      stup->nmask = stup->nvox_mask = 0 ;
1089      if( mverb > 1 ) ININFO_message("- no weight image") ;
1090    }
1091 
1092    /*-- extract matching points from base image (maybe) --*/
1093 
1094    nmatch = stup->npt_match ;
1095    if( stup->npt_match <= 9 || stup->npt_match > stup->bsim->nvox ){
1096      nmatch = stup->bsim->nvox ; use_all = 1 ;
1097    }
1098    if( stup->nmask > 0 && stup->npt_match > stup->nmask ){
1099      nmatch = stup->nmask ; use_all = 2 ;
1100    }
1101 
1102    if( stup->im == NULL || stup->im->nar != nmatch || stup->npt_match != nmatch )
1103      need_pts = 1 ;
1104 
1105    if( need_pts ){      /* extract points from base image? */
1106      MRI_IMAGE *bim ;
1107 
1108      stup->npt_match = nmatch ;
1109      if( mverb > 1 ) ININFO_message("- using %d points from base image [use_all=%d]",nmatch,use_all) ;
1110 
1111      if( use_all == 1 ){         /*------------- all points, no mask -----------*/
1112 
1113 STATUS("need_pts: use_all==1") ;
1114 
1115        if( stup->im != NULL ){
1116          KILL_floatvec(stup->im) ;
1117          KILL_floatvec(stup->jm) ;
1118          KILL_floatvec(stup->km) ;
1119        }
1120        stup->im = stup->jm = stup->km = NULL ;
1121 
1122      } else if( use_all == 2 ){  /*------------- all points in mask ------------*/
1123 
1124        int nvox , pp ; byte *mask=stup->bmask ;
1125 
1126 STATUS("need_pts: use_all==2") ;
1127 
1128        if( stup->im != NULL ){
1129          KILL_floatvec(stup->im) ;
1130          KILL_floatvec(stup->jm) ;
1131          KILL_floatvec(stup->km) ;
1132        }
1133        MAKE_floatvec(stup->im,stup->npt_match) ;
1134        MAKE_floatvec(stup->jm,stup->npt_match) ;
1135        MAKE_floatvec(stup->km,stup->npt_match) ;
1136 
1137        for( mm=pp=0 ; pp < stup->npt_match ; mm++ ){
1138          if( GOOD(mm) ){
1139            ii = mm % nx; kk = mm / nxy; jj = (mm-kk*nxy) / nx;
1140            stup->im->ar[pp] = ii; stup->jm->ar[pp] = jj; stup->km->ar[pp] = kk;
1141            pp++ ;
1142          }
1143        }
1144 
1145      } else {  /*--------------------- a subset of points ----------------------*/
1146 
1147        int nvox,pp,dm , *qm ; byte *mask = stup->bmask ;
1148 
1149 STATUS("need_pts: subset") ;
1150 
1151        nvox = stup->bsim->nvox ;
1152        dm   = GA_find_relprime_fixed(nvox) ;
1153 STATUSi("  :: relprime dm",dm) ;
1154        if( stup->im != NULL ){
1155          KILL_floatvec(stup->im) ;
1156          KILL_floatvec(stup->jm) ;
1157          KILL_floatvec(stup->km) ;
1158        }
1159        MAKE_floatvec(stup->im,stup->npt_match) ;
1160        MAKE_floatvec(stup->jm,stup->npt_match) ;
1161        MAKE_floatvec(stup->km,stup->npt_match) ;
1162 
1163        qm = (int *)calloc(sizeof(int),stup->npt_match) ;
1164        if( qm == NULL ) ERREX("qm malloc fails") ;
1165        mm = (nx/2) + (ny/2)*nx + (nz/2)*nxy ;
1166        for( pp=0 ; pp < stup->npt_match ; mm=(mm+dm)%nvox )
1167          if( GOOD(mm) ) qm[pp++] = mm ;
1168 #if 0
1169 STATUS("  :: sort qm") ;
1170        qsort_int( stup->npt_match , qm ) ;
1171 #else
1172        { int cut = (int)(0.5f*sqrtf((float)stup->npt_match)) ;
1173 STATUSi("  :: mostly sort qm; cut",cut) ;
1174          qsort_int_mostly( stup->npt_match , qm , cut ) ;
1175        }
1176 #endif
1177 
1178 STATUS("  :: load index arrays") ;
1179        for( pp=0 ; pp < stup->npt_match ; pp++ ){
1180          mm = qm[pp] ;
1181          ii = mm % nx; kk = mm / nxy; jj = (mm-kk*nxy) / nx;
1182          stup->im->ar[pp] = ii; stup->jm->ar[pp] = jj; stup->km->ar[pp] = kk;
1183        }
1184        free((void *)qm) ;
1185      }
1186 
1187      /*------------- extract values from base image for matching -------------*/
1188 
1189 STATUS("extract from base") ;
1190 
1191      KILL_floatvec(stup->bvm) ; KILL_floatvec(stup->wvm) ;
1192      bim = (stup->bsims != NULL ) ? stup->bsims : stup->bsim ;
1193      bsar = MRI_FLOAT_PTR(bim) ;
1194      MAKE_floatvec(stup->bvm,stup->npt_match) ;
1195      if( stup->bwght == NULL ) stup->wvm = NULL ;
1196      else                      MAKE_floatvec(stup->wvm,stup->npt_match) ;
1197      if( stup->im == NULL ){
1198        memcpy( stup->bvm->ar , bsar , sizeof(float)*stup->npt_match ) ;
1199        if( stup->wvm != NULL )
1200          memcpy( stup->wvm->ar , MRI_FLOAT_PTR(stup->bwght) ,
1201                                  sizeof(float)*stup->npt_match ) ;
1202      } else {
1203        for( qq=0 ; qq < stup->npt_match ; qq++ ){
1204          rr = (int)(stup->im->ar[qq] + stup->jm->ar[qq]*nx + stup->km->ar[qq]*nxy) ;
1205          stup->bvm->ar[qq] = bsar[rr] ;
1206        }
1207        if( stup->bwght != NULL ){
1208          bsar = MRI_FLOAT_PTR(stup->bwght) ;
1209          for( qq=0 ; qq < stup->npt_match ; qq++ ){
1210            rr = (int)(stup->im->ar[qq] + stup->jm->ar[qq]*nx + stup->km->ar[qq]*nxy) ;
1211            stup->wvm->ar[qq] = bsar[rr] ;
1212          }
1213        }
1214      }
1215      if( stup->usetemp ){
1216        mri_purge(stup->bwght) ;  /* 21 Dec 2006 */
1217        mri_purge(bim) ;          /* 22 Dec 2006 */
1218      }
1219 
1220      /* do match_code specific pre-processing of the extracted data */
1221 
1222 #if 0
1223      switch( stup->match_code ){
1224        case GA_MATCH_SPEARMAN_SCALAR:
1225          STATUS("doing spearman_rank_prepare") ;
1226          stup->bvstat = spearman_rank_prepare( stup->npt_match, stup->bvm->ar );
1227        break ;
1228      }
1229 #endif
1230 
1231    } /* end of if(need_pts) */
1232 
1233    if( stup->blokset != NULL ){                      /* 20 Aug 2007 */
1234      GA_BLOK_KILL(stup->blokset) ;
1235      stup->blokset = NULL ;   /* will be re-created later if needed */
1236    }
1237 
1238    stup->need_hist_setup = 1 ;   /* 08 May 2007 */
1239 
1240    if( mverb > 1 ) ININFO_message("* Exit alignment setup routine") ;
1241    stup->setup = SMAGIC ;
1242    EXRETURN ;
1243 }
1244 
1245 /*---------------------------------------------------------------------------*/
1246 /*! Set the byte mask for the target (or unset it).
1247     Should be done BEFORE mri_genalign_scalar_setup().  [07 Aug 2007] */
1248 
mri_genalign_set_targmask(MRI_IMAGE * im_tmask,GA_setup * stup)1249 void mri_genalign_set_targmask( MRI_IMAGE *im_tmask , GA_setup *stup )
1250 {
1251    int nmask , nvox ;
1252 ENTRY("mri_genalign_set_targmask") ;
1253    if( stup == NULL ) EXRETURN ;
1254    stup->najmask = 0 ;
1255    if( stup->ajmask != NULL ){ mri_free(stup->ajmask); stup->ajmask = NULL; }
1256    if( im_tmask != NULL ){
1257      if( stup->ajim != NULL ){
1258        if( im_tmask->nvox != stup->ajim->nvox ){
1259          ERROR_message("mri_genalign_set_targmask: image mismatch!") ;
1260          EXRETURN ;
1261        } else {
1262          WARNING_message("mri_genalign_set_targmask: called after setup()?!") ;
1263        }
1264      }
1265      stup->ajmask = mri_to_byte(im_tmask) ;
1266      nvox = stup->ajmask->nvox ;
1267      stup->najmask = nmask = THD_countmask( nvox , MRI_BYTE_PTR(stup->ajmask) ) ;
1268      if( nmask < 999 && nmask/(float)nvox < 0.1f ){
1269        WARNING_message(
1270         "mri_genalign_set_targmask: mask has %d voxels out of %d total ==> ignored!",
1271         nmask , nvox ) ;
1272        mri_free(stup->ajmask) ; stup->ajmask = NULL ; stup->najmask = 0 ;
1273      } else if( mverb > 2 ) {
1274        ININFO_message("source mask has %d [out of %d] voxels",nmask,nvox) ;
1275      }
1276    }
1277    EXRETURN ;
1278 }
1279 
1280 /*---------------------------------------------------------------------------*/
1281 /*! Set the byte mask for the base (or unset it).
1282     Should be done BEFORE mri_genalign_scalar_setup().  [25 Feb 2010] */
1283 
mri_genalign_set_basemask(MRI_IMAGE * im_bmask,GA_setup * stup)1284 void mri_genalign_set_basemask( MRI_IMAGE *im_bmask , GA_setup *stup )
1285 {
1286    int nmask , nvox ;
1287 ENTRY("mri_genalign_set_basemask") ;
1288    if( stup == NULL ) EXRETURN ;
1289    if( stup->bsmask != NULL ){ mri_free(stup->bsmask); stup->bsmask = NULL; }
1290    stup->nbsmask = 0 ;
1291    if( im_bmask != NULL ){
1292      if( stup->ajim != NULL ){
1293        if( im_bmask->nvox != stup->ajim->nvox ){
1294          ERROR_message("mri_genalign_set_targmask: image mismatch!") ;
1295          EXRETURN ;
1296        } else {
1297          WARNING_message("mri_genalign_set_targmask: called after setup()?!") ;
1298        }
1299      }
1300      stup->bsmask = mri_to_byte(im_bmask) ;
1301      nvox = stup->bsmask->nvox ;
1302      stup->nbsmask = nmask = THD_countmask( nvox , MRI_BYTE_PTR(stup->bsmask) ) ;
1303      if( nmask < 999 && nmask/(float)nvox < 0.09f ){
1304        WARNING_message(
1305         "mri_genalign_set_basemask: mask has %d voxels out of %d total ==> ignored!",
1306         nmask , nvox ) ;
1307        mri_free(stup->bsmask); stup->bsmask = NULL; stup->nbsmask = 0;
1308      } else if( mverb > 2 ) {
1309        ININFO_message("base mask has %d [out of %d] voxels",nmask,nvox) ;
1310      }
1311    }
1312    EXRETURN ;
1313 }
1314 
1315 /*---------------------------------------------------------------------------*/
1316 /*! Setup parameters for optimizing.
1317 *//*-------------------------------------------------------------------------*/
1318 
GA_param_setup(GA_setup * stup)1319 void GA_param_setup( GA_setup *stup )
1320 {
1321    int ii , qq ;
1322 
1323 ENTRY("GA_param_setup") ;
1324 
1325    if( stup == NULL || stup->setup != SMAGIC ){
1326      ERROR_message("Illegal call to GA_param_setup()") ;
1327      EXRETURN ;
1328    }
1329 
1330    /* count free parameters to optimize over */
1331 
1332    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ )
1333      if( !stup->wfunc_param[qq].fixed ) ii++ ;
1334 
1335    stup->wfunc_numfree = ii ;
1336    if( ii == 0 ){
1337      ERROR_message("No free parameters in GA_param_setup()?"); EXRETURN;
1338    }
1339    for( qq=0 ; qq < stup->wfunc_numpar ; qq++ )
1340      stup->wfunc_param[qq].siz = stup->wfunc_param[qq].max
1341                                 -stup->wfunc_param[qq].min ;
1342 
1343 #if 0
1344    if( mverb ){
1345      fprintf(stderr," + %d free parameters:\n",stup->wfunc_numfree) ;
1346      for( qq=0 ; qq < stup->wfunc_numpar ; qq++ )
1347        fprintf(stderr,"  #%d = %s [%.2f..%.2f] (fixed=%d)\n",
1348                qq , stup->wfunc_param[qq].name ,
1349                stup->wfunc_param[qq].min , stup->wfunc_param[qq].max ,
1350                stup->wfunc_param[qq].fixed ) ;
1351    }
1352 #endif
1353 
1354    EXRETURN ;
1355 }
1356 
1357 /*---------------------------------------------------------------------------*/
1358 /*! Optimize warping parameters, after doing setup.
1359     Return value is number of optimization functional calls
1360     (if it reaches nstep, then final accuracy of rend was not reached).
1361 *//*-------------------------------------------------------------------------*/
1362 
mri_genalign_scalar_optim(GA_setup * stup,double rstart,double rend,int nstep)1363 int mri_genalign_scalar_optim( GA_setup *stup ,
1364                                double rstart, double rend, int nstep )
1365 {
1366    double *wpar ;
1367    int ii , qq , nfunc ;
1368 
1369 ENTRY("mri_genalign_scalar_optim") ;
1370 
1371    if( stup == NULL || stup->setup != SMAGIC ){
1372      ERROR_message("Illegal call to mri_genalign_scalar_optim()") ;
1373      RETURN(-1) ;
1374    }
1375 
1376    GA_param_setup(stup) ;
1377    if( stup->wfunc_numfree <= 0 ) RETURN(-2) ;
1378 
1379    /* copy initial warp parameters into local array wpar,
1380       scaling to the range 0..1                          */
1381 
1382    RAND_ROUND ;
1383 
1384    wpar = (double *)calloc(sizeof(double),stup->wfunc_numfree) ;
1385    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1386      if( !stup->wfunc_param[qq].fixed ){
1387        wpar[ii] = ( stup->wfunc_param[qq].val_init
1388                    -stup->wfunc_param[qq].min    ) / stup->wfunc_param[qq].siz;
1389        if( wpar[ii] < 0.0 || wpar[ii] > 1.0 ) wpar[ii] = PRED01(wpar[ii]) ;
1390        ii++ ;
1391      }
1392    }
1393 
1394    gstup = stup ;  /* for global access, in other functions in this file */
1395    gstup_bk = stup ;
1396 
1397    if( nstep <= 4*stup->wfunc_numfree+5 ) nstep = 6666 ;
1398 
1399         if( rstart >  0.2 ) rstart = 0.2 ;  /* our parameters are */
1400    else if( rstart <= 0.0 ) rstart = 0.1 ;  /* all in range 0..1 */
1401 
1402    if( rend >= 0.9*rstart || rend <= 0.0 ) rend = 0.0666 * rstart ;
1403 
1404    /*** all the real work takes place now ***/
1405 
1406    mpr = 0 ;
1407    nfunc = powell_newuoa( stup->wfunc_numfree , wpar ,
1408                           rstart , rend , nstep , GA_scalar_fitter ) ;
1409 
1410    stup->vbest = GA_scalar_fitter( stup->wfunc_numfree , wpar ) ;
1411 
1412 #if 1
1413   if( mpr > 0 && mverb > 1 ) fprintf(stderr,"\n") ;
1414 #endif
1415 
1416    /* copy+scale output parameter values back to stup struct */
1417 
1418    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1419      if( stup->wfunc_param[qq].fixed ){
1420        stup->wfunc_param[qq].val_out = stup->wfunc_param[qq].val_fixed ;
1421      } else {
1422        stup->wfunc_param[qq].val_out = stup->wfunc_param[qq].min
1423                                       +stup->wfunc_param[qq].siz
1424                                        *PRED01(wpar[ii]) ;
1425        ii++ ;
1426      }
1427    }
1428 
1429    free((void *)wpar) ;
1430 
1431    RETURN(nfunc) ;
1432 }
1433 
1434 /*---------------------------------------------------------------------------*/
1435 /*! Get the cost function for the given setup. */
1436 /*---------------------------------------------------------------------------*/
1437 
mri_genalign_scalar_cost(GA_setup * stup,float * parm)1438 float mri_genalign_scalar_cost( GA_setup *stup , float *parm )
1439 {
1440    double *wpar , val ;
1441    int ii , qq ;
1442 
1443 ENTRY("mri_genalign_scalar_cost") ;
1444 
1445    if( stup == NULL || stup->setup != SMAGIC ){
1446      ERROR_message("Illegal call to mri_genalign_scalar_cost()") ;
1447      RETURN( BIGVAL );
1448    }
1449 
1450    GA_param_setup(stup) ;
1451    if( stup->wfunc_numfree <= 0 ) RETURN( BIGVAL );
1452 
1453    /* copy initial warp parameters into local array wpar,
1454       scaling to the range 0..1                          */
1455 
1456    wpar = (double *)calloc(sizeof(double),stup->wfunc_numfree) ;
1457 
1458    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1459      if( !stup->wfunc_param[qq].fixed ){
1460        val = (parm == NULL) ? stup->wfunc_param[qq].val_init : parm[qq] ;
1461        wpar[ii] = (val - stup->wfunc_param[qq].min) / stup->wfunc_param[qq].siz;
1462        if( wpar[ii] < 0.0 || wpar[ii] > 1.0 ) wpar[ii] = PRED01(wpar[ii]) ;
1463        ii++ ;
1464      }
1465    }
1466 
1467    gstup = stup ;  /* for global access, in other functions in this file */
1468    gstup_bk = stup ;
1469 
1470    val = GA_scalar_fitter( stup->wfunc_numfree , wpar ) ;
1471 
1472    free((void *)wpar) ; RETURN( (float)val );
1473 }
1474 
1475 /*---------------------------------------------------------------------------*/
1476 /*! Return ALL cost functions for a given setup. */
1477 /*---------------------------------------------------------------------------*/
1478 
mri_genalign_scalar_allcosts(GA_setup * stup,float * parm)1479 floatvec * mri_genalign_scalar_allcosts( GA_setup *stup , float *parm )
1480 {
1481    floatvec *costvec ;
1482    double *wpar , val ;
1483    float *avm , *bvm , *wvm ;
1484    int ii , qq , meth ;
1485 
1486 ENTRY("mri_genalign_scalar_allcosts") ;
1487 
1488    if( stup == NULL || stup->setup != SMAGIC ){
1489      ERROR_message("Illegal call to mri_genalign_scalar_allcosts()") ;
1490      RETURN(NULL) ;
1491    }
1492 
1493    GA_param_setup(stup) ;
1494    if( stup->wfunc_numfree <= 0 ) RETURN(NULL);
1495 
1496    /* copy initial warp parameters into local array wpar,
1497       scaling to the range 0..1                          */
1498 
1499    wpar = (double *)calloc(sizeof(double),stup->wfunc_numfree) ;
1500    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1501      if( !stup->wfunc_param[qq].fixed ){
1502        val = (parm == NULL) ? stup->wfunc_param[qq].val_init : parm[qq] ;
1503        wpar[ii] = (val - stup->wfunc_param[qq].min) / stup->wfunc_param[qq].siz;
1504        if( wpar[ii] < 0.0 || wpar[ii] > 1.0 ) wpar[ii] = PRED01(wpar[ii]) ;
1505        ii++ ;
1506      }
1507    }
1508 
1509    gstup = stup ;
1510    gstup_bk = stup ;
1511 
1512    avm = (float *)calloc(stup->npt_match,sizeof(float)) ; /* target points at */
1513    GA_get_warped_values( stup->wfunc_numfree,wpar,avm ) ; /* warped locations */
1514 
1515    bvm = stup->bvm->ar ;                                 /* base points */
1516    wvm = (stup->wvm != NULL) ? stup->wvm->ar : NULL ;    /* weights */
1517 
1518    GA_setup_2Dhistogram( avm , bvm ) ;
1519    MAKE_floatvec( costvec , GA_MATCH_METHNUM_SCALAR ) ;
1520 
1521    for( meth=1 ; meth <= GA_MATCH_METHNUM_SCALAR ; meth++ )
1522      costvec->ar[meth-1] = GA_scalar_costfun( meth, stup->npt_match, avm,bvm,wvm ) ;
1523 
1524    free((void *)wpar); free((void *)avm);    /* toss the trash */
1525    RETURN(costvec) ;
1526 }
1527 
1528 /*---------------------------------------------------------------------------*/
1529 /*! Return an image of the local correlations [25 Jan 2021 - RWC] */
1530 /*---------------------------------------------------------------------------*/
1531 
mri_genalign_map_pearson_local(GA_setup * stup,float * parm)1532 MRI_IMAGE * mri_genalign_map_pearson_local( GA_setup *stup , float *parm )
1533 {
1534    double *wpar , val ;
1535    float *avm , *bvm , *wvm ;
1536    int ii , qq  ;
1537    floatvec *pvec ; MRI_IMAGE *pim ;
1538 
1539 ENTRY("mri_genalign_map_pearson_local") ;
1540 
1541    /* check for malignancy */
1542 
1543    if( stup == NULL || stup->setup != SMAGIC ) RETURN(NULL) ;
1544 
1545    GA_param_setup(stup) ;
1546    if( stup->wfunc_numfree <= 0 ) RETURN(NULL);
1547 
1548    if( stup->blokset == NULL ){  /* Create blokset if not done before */
1549      float rad=stup->blokrad , mrad ; float *ima=NULL,*jma=NULL,*kma=NULL ;
1550      if( stup->smooth_code > 0 && stup->smooth_radius_base > 0.0f )
1551        rad = sqrt( rad*rad + SQR(stup->smooth_radius_base) ) ;
1552      mrad = 1.2345f*(stup->base_di + stup->base_dj + stup->base_dk) ;
1553      rad  = MAX(rad,mrad) ;
1554      if( stup->im != NULL ) ima = stup->im->ar ;
1555      if( stup->jm != NULL ) jma = stup->jm->ar ;
1556      if( stup->km != NULL ) kma = stup->km->ar ;
1557      stup->blokset = create_GA_BLOK_set(  /* cf. mri_genalign_util.c */
1558                             stup->bsim->nx, stup->bsim->ny, stup->bsim->nz,
1559                             stup->base_di , stup->base_dj , stup->base_dk ,
1560                             stup->npt_match , ima,jma,kma ,
1561                             stup->bloktype , rad , stup->blokmin , 1.0f,mverb ) ;
1562    }
1563 
1564 /** INFO_message("mri_genalign_map_pearson_local: input parameters") ; **/
1565 
1566    /* copy initial warp parameters into local wpar, scaling to range 0..1 */
1567 
1568    wpar = (double *)calloc(sizeof(double),stup->wfunc_numfree) ;
1569    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1570      if( !stup->wfunc_param[qq].fixed ){
1571        val = (parm == NULL) ? stup->wfunc_param[qq].val_init : parm[qq] ;
1572        wpar[ii] = (val - stup->wfunc_param[qq].min) / stup->wfunc_param[qq].siz;
1573        if( wpar[ii] < 0.0 || wpar[ii] > 1.0 ) wpar[ii] = PRED01(wpar[ii]) ;
1574 /** ININFO_message("  %g  %g",val,wpar[ii]) ; **/
1575        ii++ ;
1576      }
1577    }
1578    if( ii == 0 ){ free(wpar) ; RETURN(NULL) ; } /* should never happen */
1579 
1580    gstup    = stup ; /* I don't know if/why these statements are needed. */
1581    gstup_bk = stup ; /* But I copied them from somewhere else to be safe */
1582 
1583    avm = (float *)calloc(stup->npt_match,sizeof(float)) ; /* target points at */
1584    GA_get_warped_values( stup->wfunc_numfree,wpar,avm ) ; /* warped locations */
1585 
1586    bvm = stup->bvm->ar ;                                 /* base points */
1587    wvm = (stup->wvm != NULL) ? stup->wvm->ar : NULL ;    /* weights */
1588 
1589    /* get local correlation in each blok between avm and bvm, weighted by wvm */
1590 
1591    pvec = GA_pearson_vector( stup->blokset , avm , bvm , wvm ) ;
1592 
1593 /** ININFO_message("Pearson vector") ;
1594 for( ii=0 ; ii < pvec->nar ; ii++ ){
1595    fprintf(stderr,"   %g %d",pvec->ar[ii],stup->blokset->nelm[ii]) ;
1596    if( ii%5 == 0 && ii > 0 ) fprintf(stderr,"\n") ;
1597 }
1598 fprintf(stderr,"\n") ; **/
1599 
1600    /* push these into an image */
1601 
1602    pim = GA_pearson_image( stup , pvec ) ;
1603 
1604    KILL_floatvec(pvec) ; free(wpar) ;  /* tosso the trasholo */
1605 
1606    RETURN(pim) ;
1607 }
1608 
1609 /*---------------------------------------------------------------------------*/
1610 /*! Test some random starting points.  Sets val_init values in stup.
1611     This is the first stage of the 3dAllineate coarse alignment phase.
1612 *//*-------------------------------------------------------------------------*/
1613 
mri_genalign_scalar_ransetup(GA_setup * stup,int nrand)1614 void mri_genalign_scalar_ransetup( GA_setup *stup , int nrand )
1615 {
1616    double *wpar, *spar , val , vbest , *bpar , *qpar,*cpar , dist ;
1617    int ii , qq , twof , ss , nfr , icod , nt=0 , ngood ;
1618 #define NKEEP (3*PARAM_MAXTRIAL+1)
1619    double *kpar[NKEEP] , kval[NKEEP] , qval[NKEEP] ;
1620    int nk,kk,jj, ngrid,ngtot , maxstep ;
1621    int ival[NKEEP] , rval[NKEEP] , neval[NKEEP] ; float fval[NKEEP] ;
1622    int idx[NKEEP] ;
1623    char mrk[6]="*o+-." ;
1624 
1625 ENTRY("mri_genalign_scalar_ransetup") ;
1626 
1627    if( stup == NULL || stup->setup != SMAGIC ){
1628      ERROR_message("Illegal call to mri_genalign_scalar_ransetup()") ;
1629      EXRETURN ;
1630    }
1631    if( nrand < NKEEP ) nrand = NKEEP+13 ;
1632 
1633    GA_param_setup(stup) ; gstup = stup ; gstup_bk = stup ;
1634    if( stup->wfunc_numfree <= 0 ) EXRETURN ;
1635 
1636    nfr = stup->wfunc_numfree ;    /* number of free parameters */
1637    switch( nfr ){                 /* set ngrid = number of points along */
1638      case 1: ngrid = 9 ; break ;  /*             parameter axis to scan */
1639      case 2: ngrid = 8 ; break ;  /*             in the regular grid    */
1640      case 3: ngrid = 6 ; break ;
1641      case 4: ngrid = 4 ; break ;
1642      case 5: ngrid = 3 ; break ;
1643      case 6: ngrid = 2 ; break ;
1644     default: ngrid = 1 ; break ;
1645    }
1646    for( ngtot=1,qq=0 ; qq < nfr ; qq++ ) ngtot *= ngrid ; /* ngrid^nfr */
1647    if( nfr < 4 ) nrand *=2 ;
1648 
1649    icod = stup->interp_code ;                     /* save and set interp mode */
1650    if( icod == MRI_NN || AFNI_noenv("AFNI_TWOPASS_LIN") )
1651      stup->interp_code = MRI_NN ;                            /* NN if desired */
1652    else
1653      stup->interp_code = MRI_LINEAR ;                       /* default=linear */
1654 
1655    wpar = (double *)calloc(sizeof(double),nfr) ;
1656    spar = (double *)calloc(sizeof(double),nfr) ;
1657    for( kk=0 ; kk < NKEEP ; kk++ )                     /* keep best NKEEP */
1658      kpar[kk] = (double *)calloc(sizeof(double),nfr) ; /* parameters sets */
1659 
1660    /* evaluate the middle of the allowed parameter range, and save it */
1661    RAND_ROUND ;
1662    for( qq=0 ; qq < nfr ; qq++ ) wpar[qq] = 0.5 ;
1663    mpr = 0 ;
1664    val = GA_scalar_fitter( nfr , wpar ) ;           /* eval cost functional */
1665    memcpy(kpar[0],wpar,sizeof(double)*nfr) ;        /* saved parameters */
1666    kval[0]  = val ;                                 /* saved cost functional */
1667    rval[0]  = 0 ;                                   /* not random */
1668    for( kk=1 ; kk < NKEEP ; kk++ ){
1669      rval[kk] = 0 ; kval[kk] = BIGVAL ;             /* all these are worse */
1670    }
1671 
1672    /* try some random places, keep the best NKEEP of them */
1673 
1674    twof = 1 << nfr ;  /* 2^nfr */
1675 
1676    if( mverb > 1 ) ININFO_message("- number of free params = %d",nfr) ;
1677    if( mverb )     fprintf(stderr," + - Test (%d+%d)*%d params [top5=%s]:#",ngtot,nrand,twof,mrk) ;
1678 
1679    /* this ensures that the same random trials will be used on all systems */
1680    myunif_reset(3456789) ;  /* 27 Aug 2008 */
1681 
1682    for( ii=0 ; ii < nrand+ngtot ; ii++ ){
1683      if( ii < ngtot ){                     /* regular grid points */
1684        double kp ;
1685        val = 0.5/(ngrid+1.0) ; ss = ii ;   /* in parameter space */
1686        for( qq=0 ; qq < nfr ; qq++ ){      /* ss = number in base ngrid */
1687          kk = ss % ngrid; ss = ss / ngrid;
1688          kp = (kk==0) ? 0.5 : (kk+1.0) ; wpar[qq] = 0.5+kp*val;
1689        }
1690      } else {                              /* pseudo-random */
1691        if( mverb && ii == ngtot ) fprintf(stderr,"$") ;
1692        for( qq=0 ; qq < nfr ; qq++ ) wpar[qq] = 0.5*(1.05+0.90*myunif()) ;
1693      }
1694 
1695      for( ss=0 ; ss < twof ; ss++ ){   /* try divers reflections about */
1696        for( qq=0 ; qq < nfr ; qq++ )   /* centre of [0,1] in all nfr axes */
1697          spar[qq] = (ss & (1<<qq)) ? 1.0-wpar[qq] : wpar[qq] ;
1698 
1699        val = GA_scalar_fitter( nfr , spar ) ;       /* get cost functional */
1700        for( kk=0 ; kk < NKEEP ; kk++ ){     /* find if this is better than */
1701          if( val < kval[kk] ){              /* something we've seen so far */
1702            for( jj=NKEEP-2 ; jj >= kk ; jj-- ){  /* push those above kk up */
1703              memcpy( kpar[jj+1] , kpar[jj] , sizeof(double)*nfr ) ;
1704              kval[jj+1] = kval[jj]; rval[jj+1] = rval[jj];
1705            }
1706            memcpy( kpar[kk] , spar , sizeof(double)*nfr ) ;   /* save what */
1707            kval[kk]  = val ;                              /* we just found */
1708            rval[kk]  = (ii >= ngtot) ;            /* is this a random set? */
1709            if( mverb && kk < 5 ) fprintf(stderr,"%c",mrk[kk]) ;
1710            break ;
1711          }
1712        }
1713      } /* end of scan over parameter reflections */
1714    } /* end of initial scan; should have NKEEP best results in kpar & kval */
1715 
1716    /* 23 Feb 2010: check how many are actually good */
1717 
1718    for( ngood=kk=0 ; kk < NKEEP && kval[kk] < BIGVAL ; kk++,ngood++ ) ; /*nada*/
1719    if( ngood < 1 ){  /* should never happen */
1720      ERROR_message("Can't find any good starting locations!?") ;
1721      free((void *)wpar) ; free((void *)spar) ;
1722      for( kk=0 ; kk < NKEEP ; kk++ ) free((void *)kpar[kk]) ;
1723      EXRETURN ;
1724    }
1725 
1726    /* PRED01(x) is periodic reduction of x to inteval [0,1] */
1727    for( kk=0 ; kk < ngood ; kk++ ){  /* make sure are in 0..1 range */
1728      for( ii=0 ; ii < nfr ; ii++ ) kpar[kk][ii] = PRED01(kpar[kk][ii]) ;
1729      idx[kk] = kk ;      /* index in inital search results [24 Jun 2021] */
1730    }
1731 
1732    if( mverb ){                    /* print table of results? */
1733      if( mpr > 0 ) fprintf(stderr,"\n") ;
1734      fprintf(stderr," + - best %d costs found:\n",ngood) ;
1735      for(kk=0;kk<ngood;kk++){
1736       fprintf(stderr,"   o=%2d v=% 9.6f:",kk,kval[kk]);
1737       for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1738        if( !stup->wfunc_param[qq].fixed ){
1739         val = stup->wfunc_param[qq].min+stup->wfunc_param[qq].siz*kpar[kk][ii];
1740         fprintf(stderr," % 6.2f",val) ; ii++ ;
1741        }
1742       }
1743       fprintf(stderr,"  [%s]" , rval[kk] ? "rand" : "grid" ) ;
1744       fprintf(stderr,"\n") ;
1745      }
1746    }
1747 
1748    /* try a little optimization on each of these parameter sets */
1749 
1750    /** POTENTIAL CHANGE: free the fixed parameters before this step? **/
1751 
1752    vbest = BIGVAL ; jj = 0 ; if( icod != MRI_NN ) stup->interp_code = MRI_LINEAR ;
1753    if( mverb ) fprintf(stderr," + - A little optimization:") ;
1754    maxstep = 11*nfr+17 ;
1755    for( kk=0 ; kk < ngood ; kk++ ){
1756      if( kval[kk] >= BIGVAL ) continue ;  /* should not happen */
1757      RAND_ROUND ;
1758      /* here is the "little optimization" of kpar[] */
1759      neval[kk] = powell_newuoa( nfr , kpar[kk] ,
1760                                 0.05 , 0.001 , maxstep , GA_scalar_fitter ) ;
1761      /* cost functional at result */
1762      kval[kk]  = GA_scalar_fitter( nfr , kpar[kk] ) ;
1763      if( kval[kk] < vbest ){ vbest = kval[kk]; jj = kk; }  /* jj = "best" */
1764      if( mverb ) fprintf(stderr,".") ;
1765    }
1766    if( mverb ) fprintf(stderr,"\n") ;
1767    stup->vbest = vbest ;  /* save for user's edification */
1768 
1769    for( kk=0 ; kk < ngood ; kk++ )  /* make sure are in 0..1 range */
1770      for( ii=0 ; ii < nfr ; ii++ ) kpar[kk][ii] = PRED01(kpar[kk][ii]) ;
1771 
1772    /* 23 Feb 2010: cast out the bad overlap (coincidence count) guys */
1773    /* [I think that part of the code was excised???] */
1774 
1775    /* copy into qval */
1776 
1777    for( kk=0 ; kk < ngood ; kk++ ) qval[kk] = kval[kk] ;
1778 
1779    /* at this point, smallest error is vbest and best index in kpar is jj */
1780 
1781    if( mverb ){                    /* print out optimized results? */
1782      fprintf(stderr," + - costs of the above after a little optimization:\n") ;
1783      for( kk=0 ; kk < ngood ; kk++ ){
1784       fprintf(stderr,"  %co=%2d v=% 9.6f:",(kk==jj)?'*':' ',kk,qval[kk]);
1785       for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1786        if( !stup->wfunc_param[qq].fixed ){
1787         val = stup->wfunc_param[qq].min+stup->wfunc_param[qq].siz*kpar[kk][ii];
1788         fprintf(stderr," % 6.2f",val) ; ii++ ;
1789        }
1790       }
1791       fprintf(stderr,"  [%s]" , rval[kk] ? "rand" : "grid" ) ;
1792       fprintf(stderr,"  [f=%d]" , neval[kk] ) ; /* 01 Jun 2021 */
1793       fprintf(stderr,"\n") ;
1794      }
1795    }
1796 
1797    /* save best result in the stup (setup) parameter structure */
1798 
1799    bpar = kpar[jj] ;
1800    for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1801      if( !stup->wfunc_param[qq].fixed ){
1802        stup->wfunc_param[qq].val_init = stup->wfunc_param[qq].min
1803                                        +stup->wfunc_param[qq].siz*bpar[ii];
1804        ii++ ;
1805      }
1806      stup->wfunc_param[qq].val_out = stup->wfunc_param[qq].val_init ;
1807    }
1808 
1809    /* sort kval, then store a bunch of the best parameters,
1810       which are not too close to their betters (in cost)   */
1811 
1812 #undef  DTHRESH
1813 #define DTHRESH 0.05
1814 
1815    /* copy into fval, with index in orig list in ival */
1816 
1817    for( ii=0 ; ii < ngood ; ii++ ){ fval[ii] = kval[ii]; ival[ii] = ii; }
1818 
1819    qsort_floatint( ngood , fval , ival ) ; /* sort on fval, carrying ival along */
1820 
1821    for( qq=0 ; qq < stup->wfunc_numpar ; qq++ ){ /** save best into trial #0 **/
1822      if( !stup->wfunc_param[qq].fixed )
1823        stup->wfunc_param[qq].val_trial[0] = stup->wfunc_param[qq].val_init ;
1824      else
1825        stup->wfunc_param[qq].val_trial[0] = stup->wfunc_param[qq].val_fixed ;
1826      stup->wfunc_param[qq].idx_trial[0] = ival[0] ;
1827    }
1828 
1829    if( mverb > 1 ) ININFO_message("- saving #%2d for use with twobest",ival[0]) ;
1830 
1831    nt = 1 ;
1832    for( jj=1 ; jj < ngood && nt < PARAM_MAXTRIAL ; jj++ ){
1833      qpar = kpar[ival[jj]] ;                 /* the jj-th best param set */
1834      for( kk=0 ; kk < jj ; kk++ ){   /* loop over the previous best ones */
1835        cpar =  kpar[ival[kk]] ;
1836        for( dist=0.0,ii=0 ; ii < nfr ; ii++ ){ /* compute dist from previous best [jj] */
1837          val = fabs(qpar[ii]-cpar[ii]) ; dist = MAX(dist,val) ;
1838        }
1839        if( dist < DTHRESH ){  /* too close to cpar ==> skip */
1840          if( mverb > 1 )
1841            ININFO_message("- skip #%2d for twobest: too close to set #%2d",
1842                            ival[jj], ival[kk] ) ;
1843          goto NEXT_jj ;
1844        }
1845      }
1846      if( mverb > 1 ) ININFO_message("- saving #%2d for use with twobest",ival[jj]) ;
1847      for( ii=qq=0 ; qq < stup->wfunc_numpar ; qq++ ){
1848        if( !stup->wfunc_param[qq].fixed ){
1849          stup->wfunc_param[qq].val_trial[nt] = stup->wfunc_param[qq].min
1850                                               +stup->wfunc_param[qq].siz
1851                                                *qpar[ii];
1852          ii++ ;
1853        } else {
1854          stup->wfunc_param[qq].val_trial[nt] = stup->wfunc_param[qq].val_fixed ;
1855        }
1856        stup->wfunc_param[qq].idx_trial[nt] = ival[jj] ;
1857      }
1858      nt++ ; /* 1 more trial set saved */
1859    NEXT_jj: ; /* end of loop over jj = list of trial parameter sets */
1860    }
1861    stup->wfunc_ntrial = nt ;
1862 
1863    /*** cleanup and exeunt ***/
1864 
1865    free((void *)wpar) ; free((void *)spar) ;
1866    for( kk=0 ; kk < NKEEP ; kk++ ) free((void *)kpar[kk]) ;
1867 
1868    stup->interp_code = icod ; EXRETURN ;
1869 }
1870 
1871 /*---------------------------------------------------------------------------*/
1872 /*! Warp the entire target image to base coords.  Will be in float format.
1873 *//*-------------------------------------------------------------------------*/
1874 
mri_genalign_scalar_warpim(GA_setup * stup)1875 MRI_IMAGE * mri_genalign_scalar_warpim( GA_setup *stup )
1876 {
1877    MRI_IMAGE *wim ;
1878    float     *war ;
1879    float      oot ;
1880 
1881 ENTRY("mri_genalign_scalar_warpim") ;
1882 
1883    if( stup       == NULL || stup->setup != SMAGIC ||
1884        stup->ajim == NULL || stup->bsim  == NULL     ){
1885      ERROR_message("Illegal call to mri_genalign_scalar_warpim()") ;
1886      RETURN(NULL) ;
1887    }
1888    gstup = stup ; gstup_bk = stup ;
1889 
1890    wim = mri_new_conforming( stup->bsim , MRI_float ) ;
1891    war = MRI_FLOAT_PTR(wim) ;
1892 
1893    oot = GA_get_outval() ; GA_set_outval(0.0f) ;
1894    GA_get_warped_values( 0 , NULL , war ) ;
1895    GA_set_outval(oot) ;
1896 
1897    RETURN(wim) ;
1898 }
1899 
1900 /*-------------------------------------------------------------------------*/
1901 /*! Warp an image to base coords, on an nnx X nny X nnz grid.
1902      - The mapping between ijk and base xyz coords, and
1903        the mapping between target xyz and ijk coords must have
1904        been set in mri_genalign_affine_set_befafter() before calling this!
1905      - The warping between base xyz and target xyz is given by the
1906        wfunc, which has npar parameters stored in wpar.
1907      - The interpolation method is in icode.
1908      - Output is in float format, no matter what input data format was.
1909      - Generalized from GA_get_warped_values() -- RWCox - 26 Sep 2006.
1910 *//*-----------------------------------------------------------------------*/
1911 
mri_genalign_scalar_warpone(int npar,float * wpar,GA_warpfunc * wfunc,MRI_IMAGE * imtarg,int nnx,int nny,int nnz,int icode)1912 MRI_IMAGE * mri_genalign_scalar_warpone( int npar, float *wpar, GA_warpfunc *wfunc,
1913                                          MRI_IMAGE *imtarg ,
1914                                          int nnx , int nny , int nnz , int icode )
1915 {
1916    int   ii,jj,kk,qq,pp,npp,mm,nx,ny,nxy,nz , npt,nall , nper ;
1917    float x,y,z ;
1918    float *imf , *jmf , *kmf ;
1919    float *imw , *jmw , *kmw ;
1920    MRI_IMAGE *wim , *inim ;
1921    float     *war , *inar ;
1922    float oot ;
1923    static int recur=0 ;
1924 
1925 ENTRY("mri_genalign_scalar_warpone") ;
1926 
1927    if( wfunc == NULL || imtarg == NULL ) RETURN(NULL) ;
1928 
1929    /** allow for 'vector' images (for example, RGB) [12 May 2020]       **/
1930    /** see mrilib.h for use of recursion via CALLME and VECTORME macros **/
1931 
1932 #undef  CALLME
1933 #define CALLME(inp,out) (out) = mri_genalign_scalar_warpone(                  \
1934                                  npar,wpar,wfunc, (inp) , nnx,nny,nnz,icode )
1935    if( ISVECTIM(imtarg) ){
1936      recur=1 ; VECTORME(imtarg,wim) ; recur=0 ; RETURN(wim) ;
1937    }
1938 
1939    /** OK, the normal case **/
1940 
1941    nper = MAX(nperval,NPER) ;
1942 
1943    /* send parameters to warping function, for setup */
1944 
1945    if( mverb > 1 && !recur ){
1946      fprintf(stderr,"++ image warp: parameters =") ;
1947      for( ii=0 ; ii < npar ; ii++ ) fprintf(stderr," %.4f",wpar[ii]) ;
1948      fprintf(stderr,"\n") ;
1949 #if 0
1950      if( AFNI_yesenv("ALLIN_DEBUG") ){
1951        if( aff_use_before ) DUMP_MAT44("aff_before",aff_before) ;
1952        if( aff_use_after  ) DUMP_MAT44("aff_after ",aff_after ) ;
1953      }
1954 #endif
1955    }
1956 
1957    RAND_ROUND ;
1958    wfunc( npar , wpar , 0,NULL,NULL,NULL , NULL,NULL,NULL ) ;
1959 
1960    /* create float copy of input image, if needed */
1961 
1962    if( imtarg->kind == MRI_float ) inim = imtarg ;
1963    else                            inim = mri_to_float(imtarg) ;
1964    inar = MRI_FLOAT_PTR(inim) ;
1965 
1966    /* dimensions of output image */
1967 
1968    nx = nnx ; ny = nny ; nz = nnz ; nxy = nx*ny ; npt = nxy * nz ;
1969    wim = mri_new_vol( nx,ny,nz , MRI_float ) ;
1970    war = MRI_FLOAT_PTR(wim) ;
1971 
1972    /* ijk coordinates in base image to be warped to target ijk */
1973 
1974    nall = MIN(nper,npt) ;
1975 
1976    imf = (float *)calloc(sizeof(float),nall) ;
1977    jmf = (float *)calloc(sizeof(float),nall) ;
1978    kmf = (float *)calloc(sizeof(float),nall) ;
1979 
1980    /* ijk coordinates after warping */
1981 
1982    imw = (float *)calloc(sizeof(float),nall) ;
1983    jmw = (float *)calloc(sizeof(float),nall) ;
1984    kmw = (float *)calloc(sizeof(float),nall) ;
1985 
1986    oot = GA_get_outval() ; GA_set_outval(0.0f) ;
1987 
1988    /*--- do (up to) nall points at a time ---*/
1989 
1990    for( pp=0 ; pp < npt ; pp+=nall ){
1991      npp = MIN( nall , npt-pp ) ;      /* number to do */
1992 
1993      /* get base ijk coords */
1994 
1995      for( qq=0 ; qq < npp ; qq++ ){
1996        mm = pp+qq ;
1997        ii = mm % nx; kk = mm / nxy; jj = (mm-kk*nxy) / nx;
1998        imf[qq] = (float)ii; jmf[qq] = (float)jj; kmf[qq] = (float)kk;
1999      }
2000 
2001      /**** warp base points to new locations ****/
2002 
2003      RAND_ROUND ;
2004      wfunc( npar , NULL , npp  , imf,jmf,kmf , imw,jmw,kmw ) ;
2005 
2006      /* interpolate target image at warped points */
2007 
2008      switch( icode ){
2009        case MRI_NN:
2010          GA_interp_NN( inim , npp , imw,jmw,kmw , war+pp ) ;
2011        break ;
2012 
2013        case MRI_LINEAR:
2014          GA_interp_linear( inim , npp , imw,jmw,kmw , war+pp ) ;
2015        break ;
2016 
2017        case MRI_CUBIC:
2018          GA_interp_cubic( inim , npp , imw,jmw,kmw , war+pp ) ;
2019        break ;
2020 
2021        case MRI_VARP1:
2022          GA_interp_varp1( inim , npp , imw,jmw,kmw , war+pp ) ;
2023        break ;
2024 
2025        case MRI_WSINC5:
2026          GA_interp_wsinc5( inim , npp , imw,jmw,kmw , war+pp ) ;
2027        break ;
2028 
2029        default:        /* for higher order methods not implemented here */
2030        case MRI_QUINTIC:
2031          GA_interp_quintic( inim , npp , imw,jmw,kmw , war+pp ) ;
2032        break ;
2033      }
2034    }
2035 
2036    GA_set_outval(oot) ;
2037 
2038    /* clip interpolated values to range of target image, if need be */
2039 
2040    if( MRI_HIGHORDER(icode) ){
2041      float bb=inar[0] , tt=inar[0] ; int nin=inim->nvox ;
2042      for( pp=1 ; pp < nin ; pp++ ) if( inar[pp] < bb ) bb = inar[pp] ;
2043                               else if( inar[pp] > tt ) tt = inar[pp] ;
2044      for( pp=0 ; pp < npt ; pp++ ) if( war[pp]  < bb ) war[pp] = bb ;
2045                               else if( war[pp]  > tt ) war[pp] = tt ;
2046    }
2047 
2048    /* free the enslaved memory */
2049 
2050    free((void *)kmw); free((void *)jmw); free((void *)imw);
2051    free((void *)kmf); free((void *)jmf); free((void *)imf);
2052    if( inim != imtarg ) mri_free(inim) ;
2053 
2054    RETURN(wim) ;
2055 }
2056 
2057 #ifdef ALLOW_NWARP /*********************************************************/
2058 #ifndef HAVE_HEXVOL
2059 #define HAVE_HEXVOL
2060 /*----------------------------------------------------------------------------*/
2061 /* Volume of a hexahedron (distorted cube) given by 8 corners.
2062    Looking down from the top, the bottom plane points are numbered so:
2063        2 -- 3
2064        |    |  and the top plane is similar (add 4 to each index),
2065        0 -- 1  with point #(i+4) 'above' point #i.
2066 *//*..........................................................................*/
2067 
2068 #undef  TRIPROD
2069 #define TRIPROD(ax,ay,az,bx,by,bz,cx,cy,cz) ( (ax)*((by)*(cz)-(bz)*(cy)) \
2070                                              +(bx)*((cy)*(az)-(cz)*(ay)) \
2071                                              +(cx)*((ay)*(bz)-(az)*(by))  )
2072 #undef  DA
2073 #undef  DB
2074 #undef  DC
2075 #define DA(p,q) (p.a-q.a)
2076 #define DB(p,q) (p.b-q.b)
2077 #define DC(p,q) (p.c-q.c)
2078 
hexahedron_volume(float_triple x0,float_triple x1,float_triple x2,float_triple x3,float_triple x4,float_triple x5,float_triple x6,float_triple x7)2079 static float hexahedron_volume( float_triple x0 , float_triple x1 ,
2080                                 float_triple x2 , float_triple x3 ,
2081                                 float_triple x4 , float_triple x5 ,
2082                                 float_triple x6 , float_triple x7  )
2083 {
2084    float xa,ya,za , xb,yb,zb , xc,yc,zc , vol ;
2085 
2086    xa = DA(x7,x1)+DA(x6,x0); ya = DB(x7,x1)+DB(x6,x0); za = DC(x7,x1)+DC(x6,x0);
2087    xb = DA(x7,x2)          ; yb = DB(x7,x2)          ; zb = DC(x7,x2) ;
2088    xc = DA(x3,x0)          ; yc = DB(x3,x0)          ; zc = DC(x3,x0) ;
2089    vol = TRIPROD(xa,ya,za,xb,yb,zb,xc,yc,zc) ;
2090    xa = DA(x6,x0)          ; ya = DB(x6,x0)          ; za = DC(x6,x0) ;
2091    xb = DA(x7,x2)+DA(x5,x0); yb = DB(x7,x2)+DB(x5,x0); zb = DC(x7,x2)+DC(x5,x0);
2092    xc = DA(x7,x4)          ; yc = DB(x7,x4)          ; zc = DC(x7,x4) ;
2093    vol += TRIPROD(xa,ya,za,xb,yb,zb,xc,yc,zc) ;
2094    xa = DA(x7,x1)          ; ya = DB(x7,x1)          ; za = DC(x7,x1) ;
2095    xb = DA(x5,x0)          ; yb = DB(x5,x0)          ; zb = DC(x5,x0) ;
2096    xc = DA(x7,x4)+DA(x3,x0); yc = DB(x7,x4)+DB(x3,x0); zc = DC(x7,x4)+DC(x3,x0);
2097    vol += TRIPROD(xa,ya,za,xb,yb,zb,xc,yc,zc) ;
2098    return (0.08333333f*vol) ;
2099 }
2100 #undef TRIPROD
2101 #undef DA
2102 #undef DB
2103 #undef DC
2104 #endif /* HAVE_HEXVOL */
2105 
2106 /*----------------------------------------------------------------------------*/
2107 
2108 #undef IJK
2109 #undef C2F
2110 #undef D2F
2111 
2112 #define IJK(p,q,r)    ((p)+(q)*nx+(r)*nxy)
2113 #define C2F(p,q,r,xx) MAT44_VEC(cmat,(p),(q),(r),(xx).a,(xx).b,(xx).c)
2114 #define D2F(pqr,xx)   ( (xx).a+=dxar[pqr], (xx).b+=dyar[pqr], (xx).c+=dzar[pqr] )
2115 
mri_genalign_xyzwarp_volmap(MRI_IMARR * dxyzar,mat44 cmat)2116 MRI_IMAGE * mri_genalign_xyzwarp_volmap( MRI_IMARR *dxyzar , mat44 cmat )
2117 {
2118    int nx,ny,nz,nxy,nxyz ;
2119    float *dxar, *dyar, *dzar, *var ; MRI_IMAGE *vim ;
2120 
2121 ENTRY("mri_genalign_xyzwarp_volmap") ;
2122 
2123    if( dxyzar == NULL || IMARR_COUNT(dxyzar) < 3 ) RETURN(NULL) ;
2124 
2125    nx = IMARR_SUBIM(dxyzar,0)->nx ;
2126    ny = IMARR_SUBIM(dxyzar,0)->ny ;
2127    nz = IMARR_SUBIM(dxyzar,0)->nz ; nxy = nx*ny ; nxyz = nxy*nz ;
2128 
2129    /* must have at least 2 dimensions bigger than 1 */
2130 
2131    if( nxyz <= nx || nxyz <= ny || nxyz <= nz ) RETURN(NULL) ;
2132 
2133    dxar = MRI_FLOAT_PTR(IMARR_SUBIM(dxyzar,0)) ;
2134    dyar = MRI_FLOAT_PTR(IMARR_SUBIM(dxyzar,1)) ;
2135    dzar = MRI_FLOAT_PTR(IMARR_SUBIM(dxyzar,2)) ;
2136 
2137    vim = mri_new_conforming( IMARR_SUBIM(dxyzar,0) , MRI_float ) ;
2138    var = MRI_FLOAT_PTR(vim) ;
2139 
2140    if( !ISVALID_MAT44(cmat) ) LOAD_DIAG_MAT44(cmat,1,1,1) ;
2141 
2142  AFNI_OMP_START ;
2143 #pragma omp parallel if( nxyz > 33333 )
2144  { float_triple x0,x1,x2,x3,x4,x5,x6,x7 ;
2145    int ii,jj,kk , ip,jp,kp , ijk , qq ;
2146 #pragma omp for
2147    for( qq=0 ; qq < nxyz ; qq++ ){
2148      ii = qq % nx ; kk = qq / nxy ; jj = (qq-kk*nxy) / nx ;
2149      ip = ii+1 ; jp = jj+1 ; kp = kk+1 ;
2150      C2F(ii,jj,kk,x0); C2F(ip,jj,kk,x1); C2F(ii,jp,kk,x2); C2F(ip,jp,kk,x3);
2151      C2F(ii,jj,kp,x4); C2F(ip,jj,kp,x5); C2F(ii,jp,kp,x6); C2F(ip,jp,kp,x7);
2152      if( ip == nx ) ip-- ; if( jp == ny ) jp-- ; if( kp == nz ) kp-- ;
2153      ijk = IJK(ip,jj,kk) ; D2F(ijk,x1) ;
2154      ijk = IJK(ii,jp,kk) ; D2F(ijk,x2) ;
2155      ijk = IJK(ip,jp,kk) ; D2F(ijk,x3) ;
2156      ijk = IJK(ii,jj,kp) ; D2F(ijk,x4) ;
2157      ijk = IJK(ip,jj,kp) ; D2F(ijk,x5) ;
2158      ijk = IJK(ii,jp,kp) ; D2F(ijk,x6) ;
2159      ijk = IJK(ip,jp,kp) ; D2F(ijk,x7) ;
2160      ijk = qq            ; D2F(ijk,x0) ;
2161      var[qq] = hexahedron_volume(x0,x1,x2,x3,x4,x5,x6,x7) ;
2162    }
2163  }
2164  AFNI_OMP_END ;
2165 
2166    RETURN(vim) ;
2167 }
2168 #undef IJK
2169 #undef C2F
2170 #undef D2F
2171 
2172 /*----------------------------------------------------------------------------*/
2173 /* Get the 3 axes (xyz) deltas at each point in the input grid.
2174    That is, for each grid point (x,y,z), the source dataset should be
2175    evaluated at (x+dx,y+dy,z+dz) to make it match the base dataset.
2176 *//*--------------------------------------------------------------------------*/
2177 
mri_genalign_scalar_xyzwarp(int npar,float * wpar,GA_warpfunc * wfunc,int nnx,int nny,int nnz)2178 MRI_IMARR * mri_genalign_scalar_xyzwarp( int npar, float *wpar,
2179                                          GA_warpfunc *wfunc,
2180                                          int nnx , int nny , int nnz )
2181 {
2182    MRI_IMAGE *xim , *yim , *zim ;
2183    float     *xar , *yar , *zar ;
2184    MRI_IMARR *imar ;
2185    int   qq,pp,npp,nx,ny,nxy,nz , npt,nall , nper , ab,aa ;
2186    float x,y,z , *xbb , *ybb , *zbb ;
2187    mat44 cmat ;
2188 
2189 ENTRY("mri_genalign_scalar_xyzwarp") ;
2190 
2191    if( wfunc == NULL ) RETURN(NULL) ;
2192 
2193    /* send parameters to warping function, for setup */
2194 
2195    wfunc( npar , wpar , 0,NULL,NULL,NULL , NULL,NULL,NULL ) ;
2196 
2197    /* dimensions of output images */
2198 
2199    nx = nnx ; ny = nny ; nz = nnz ; nxy = nx*ny ; npt = nxy * nz ;
2200    xim = mri_new_vol( nx,ny,nz , MRI_float ) ; xar = MRI_FLOAT_PTR(xim) ;
2201    yim = mri_new_vol( nx,ny,nz , MRI_float ) ; yar = MRI_FLOAT_PTR(yim) ;
2202    zim = mri_new_vol( nx,ny,nz , MRI_float ) ; zar = MRI_FLOAT_PTR(zim) ;
2203    INIT_IMARR(imar) ;
2204    ADDTO_IMARR(imar,xim) ; ADDTO_IMARR(imar,yim) ; ADDTO_IMARR(imar,zim) ;
2205 
2206    /* ijk coordinates in base image to be warped to target xyz */
2207 
2208    nper = MAX(nperval,NPER) ; nall = MIN(nper,npt) ;
2209    xbb  = (float *)calloc(sizeof(float),nall) ;
2210    ybb  = (float *)calloc(sizeof(float),nall) ;
2211    zbb  = (float *)calloc(sizeof(float),nall) ;
2212 
2213    /* set wfunc to do xyz -> xyz transform, not ijk -> ijk */
2214 
2215    ab = aff_use_before ; aa = aff_use_after ;
2216    mri_genalign_affine_use_befafter( 0 , 0 ) ;
2217 
2218    /*--- do (up to) nall points at a time ---*/
2219 
2220    for( pp=0 ; pp < npt ; pp+=nall ){
2221      npp = MIN( nall , npt-pp ) ;      /* number to do in this group */
2222 
2223      /* get base (unwarped) xyz coords into xbb,ybb,zbb */
2224 
2225  AFNI_OMP_START ;
2226 #pragma omp parallel if( npt > 33333 )
2227  { int qq , mm , ii,jj,kk ;
2228 #pragma omp for
2229      for( qq=0 ; qq < npp ; qq++ ){
2230        mm = pp+qq ;
2231        ii = mm % nx ; kk = mm / nxy ; jj = (mm-kk*nxy) / nx ;
2232        if( ab ){
2233          MAT44_VEC( aff_before , ii,jj,kk , xbb[qq],ybb[qq],zbb[qq] ) ;
2234        } else {
2235          xbb[qq] = ii ; ybb[qq] = jj ; zbb[qq] = kk ;
2236        }
2237      }
2238  }
2239  AFNI_OMP_END ;
2240 
2241      /**** warp base points to new locations, in xar,yar,zar ****/
2242 
2243      wfunc( npar , NULL , npp  , xbb,ybb,zbb , xar+pp,yar+pp,zar+pp ) ;
2244 
2245      /* convert result to shifts rather than absolute coordinates,
2246         store back into xar,yar,zar (output arrays inside output images) */
2247 
2248      for( qq=0 ; qq < npp ; qq++ ){
2249        xar[pp+qq] -= xbb[qq] ;
2250        yar[pp+qq] -= ybb[qq] ;
2251        zar[pp+qq] -= zbb[qq] ;
2252      }
2253 
2254    } /* end of loop over groups of points */
2255 
2256    free(zbb) ; free(ybb) ; free(xbb) ;            /* tossola trashola */
2257    mri_genalign_affine_use_befafter( ab , aa ) ;  /* status quo ante */
2258 
2259    /* 13 Dec 2010: save the cell volumes as well */
2260 
2261    if( ab ) cmat = aff_before ;
2262    else     INVALIDATE_MAT44(cmat) ;
2263    zim = mri_genalign_xyzwarp_volmap( imar , cmat ) ;
2264    if( zim != NULL ) ADDTO_IMARR(imar,zim) ;
2265    RETURN(imar) ;
2266 }
2267 #endif /* ALLOW_NWARP */ /**************************************************/
2268 
2269 /*--------------------------------------------------------------------------*/
2270 /*! - Find the 8 corners of the input dataset (voxel edges, not centers).
2271     - Warp each one using the provided wfunc().
2272     - Return the min and max (x,y,z) coordinates of these warped points.
2273 *//*------------------------------------------------------------------------*/
2274 
2275 #if 0
2276 static void mri_genalign_warped_bbox( THD_3dim_dataset *inset,
2277                                       int npar, float *wpar, GA_warpfunc *wfunc,
2278                                       float *xb , float *xt ,
2279                                       float *yb , float *yt ,
2280                                       float *zb , float *zt )
2281 {
2282    THD_dataxes *daxes ;
2283    float nx0,ny0,nz0 , nx1,ny1,nz1 ;
2284    float xx,yy,zz , xbot,ybot,zbot , xtop,ytop,ztop ;
2285 
2286    /* setup stuff */
2287 
2288    if( !ISVALID_DSET(inset) || wfunc == NULL ) return ;
2289 
2290    daxes = inset->daxes ;
2291    nx0 =           -0.5 ; ny0 =           -0.5 ; nz0 =           -0.5 ;
2292    nx1 = daxes->nxx-0.5 ; ny1 = daxes->nyy-0.5 ; nz1 = daxes->nzz-0.5 ;
2293 
2294    /* send parameters to warp function, if needed */
2295 
2296    if( npar > 0 && wpar != NULL )
2297      wfunc( npar , wpar , 0,NULL,NULL,NULL , NULL,NULL,NULL ) ;
2298 
2299    wfunc() ;
2300 }
2301 #endif
2302 
2303 /*==========================================================================*/
2304 /*****------------------------ Warping functions -----------------------*****/
2305 /*****--- Functions that define the spatial transform from parameters --*****/
2306 /*--------------------------------------------------------------------------*/
2307 
2308 #include "vecmat.h"
2309 
2310 /****************************************************************************/
2311 /**************************** General affine warp ***************************/
2312 
2313 /***** 04 Oct 2006: Modify to use mat44 instead of vecmat stuff, mostly *****/
2314 
2315 /*--------------------------------------------------------------------------*/
2316 
2317 #define D2R (PI/180.0)                /* angles are in degrees */
2318 
2319 static int matorder = MATORDER_SDU ;  /* cf. mrilib.h */
2320 static int smat     = SMAT_LOWER ;
2321 static int dcode    = DELTA_AFTER  ;  /* cf. 3ddata.h */
2322 
mri_genalign_affine_setup(int mmmm,int dddd,int ssss)2323 void mri_genalign_affine_setup( int mmmm , int dddd , int ssss )
2324 {
2325    if( mmmm > 0 ) matorder = mmmm ;
2326    if( dddd > 0 ) dcode    = dddd ;
2327    if( ssss > 0 ) smat     = ssss ;
2328    return ;
2329 }
2330 
2331 /*--------------------------------------------------------------------------*/
2332 
mri_genalign_affine_set_befafter(mat44 * ab,mat44 * af)2333 void mri_genalign_affine_set_befafter( mat44 *ab , mat44 *af )
2334 {
2335    if( ab == NULL || !ISVALID_MAT44(*ab) ){
2336      aff_use_before = 0 ; INVALIDATE_MAT44(aff_before) ;
2337    } else {
2338      aff_use_before = 1 ; aff_before = *ab ;
2339    }
2340 
2341    if( af == NULL || !ISVALID_MAT44(*af) ){
2342      aff_use_after = 0 ; INVALIDATE_MAT44(aff_after) ;
2343    } else {
2344      aff_use_after = 1 ; aff_after = *af ;
2345    }
2346    return ;
2347 }
2348 
2349 /*--------------------------------------------------------------------------*/
2350 
mri_genalign_affine_get_befafter(mat44 * ab,mat44 * af)2351 void mri_genalign_affine_get_befafter( mat44 *ab , mat44 *af )
2352 {
2353    if( ab != NULL ) *ab = aff_before ;
2354    if( af != NULL ) *af = aff_after  ;
2355 }
2356 
2357 /*--------------------------------------------------------------------------*/
2358 
mri_genalign_affine_get_gammaijk(mat44 * gg)2359 void mri_genalign_affine_get_gammaijk( mat44 *gg )
2360 {
2361   if( gg != NULL ) *gg = aff_gamijk ;
2362 }
2363 
2364 /*--------------------------------------------------------------------------*/
2365 
mri_genalign_affine_get_gammaxyz(mat44 * gg)2366 void mri_genalign_affine_get_gammaxyz( mat44 *gg )
2367 {
2368   if( gg != NULL ) *gg = aff_gamxyz ;
2369 }
2370 
2371 /*--------------------------------------------------------------------------*/
2372 
mri_genalign_affine_use_befafter(int bb,int aa)2373 void mri_genalign_affine_use_befafter( int bb , int aa )
2374 {
2375    aff_use_before = ISVALID_MAT44(aff_before) && bb ;
2376    aff_use_after  = ISVALID_MAT44(aff_after)  && aa ;
2377 }
2378 
2379 /*--------------------------------------------------------------------------*/
2380 /*! Compute a rotation matrix specified by 3 angles:
2381       Q = R3 R2 R1, where Ri is rotation about axis axi by angle thi.
2382 *//*------------------------------------------------------------------------*/
2383 
rot_matrix(int ax1,double th1,int ax2,double th2,int ax3,double th3)2384 static mat44 rot_matrix( int ax1, double th1,
2385                          int ax2, double th2, int ax3, double th3  )
2386 {
2387    mat44 q , p ;
2388 
2389    LOAD_ROT_MAT44( q , th1 , ax1 ) ;
2390    LOAD_ROT_MAT44( p , th2 , ax2 ) ; q = MAT44_MUL( p , q ) ;
2391    LOAD_ROT_MAT44( p , th3 , ax3 ) ; q = MAT44_MUL( p , q ) ;
2392 
2393    return q ;
2394 }
2395 
2396 /*--------------------------------------------------------------------------*/
2397 /* Net rotation given 3 perpendicular angles [02 Jan 2019] */
2398 
total_rotation_degrees(float ax,float ay,float az)2399 float total_rotation_degrees( float ax, float ay, float az )
2400 {
2401    mat44 rr ; double cth , th ;
2402 
2403    rr  = rot_matrix( 0,D2R*ax , 1,D2R*ay , 2,D2R*az ) ;
2404    cth = 0.5 * sqrt( 1.0 + rr.m[0][0] + rr.m[1][1] + rr.m[2][2] ) ;
2405    th  = 360.0 / PI * acos(cth) ;
2406    return (float)th ;
2407 }
2408 
2409 /*--------------------------------------------------------------------------*/
2410 static int pgmat=0 ;
mri_genalign_set_pgmat(int p)2411 void mri_genalign_set_pgmat( int p ){ pgmat = p; }
2412 
2413 /*--------------------------------------------------------------------------*/
2414 
GA_setup_affine(int npar,float * parvec)2415 mat44 GA_setup_affine( int npar , float *parvec )
2416 {
2417    mat44 ss,dd,uu,aa,bb , gam ;
2418    THD_fvec3 vv ;
2419    float     a,b,c , p,q,r ;
2420 
2421    if( pgmat ){
2422      int ii ;
2423      printf("GA_setup_affine params:") ;
2424      for( ii=0 ; ii < npar ; ii++ ) printf(" %g",parvec[ii]) ;
2425      printf("\n") ;
2426    }
2427 
2428    /* uu = rotation */
2429 
2430    a = b = c = 0.0f ;
2431    if( npar >= 4 ) a = D2R*parvec[3] ;
2432    if( npar >= 5 ) b = D2R*parvec[4] ;
2433    if( npar >= 6 ) c = D2R*parvec[5] ;
2434    if( a != 0.0f || b != 0.0f || c != 0.0f )
2435      uu = rot_matrix( 2,a , 0,b , 1,c ) ;
2436    else
2437      LOAD_DIAG_MAT44( uu , 1.0f,1.0f,1.0f ) ;
2438 
2439    if( pgmat ) DUMP_MAT44("GA_setup_affine uu",uu) ;
2440 
2441    /* dd = scaling */
2442 
2443    a = b = c = 1.0f ;
2444    if( npar >= 7 ){ a = parvec[6]; if( a <= 0.10f || a >= 10.0f ) a = 1.0f; }
2445    if( npar >= 8 ){ b = parvec[7]; if( b <= 0.10f || b >= 10.0f ) b = 1.0f; }
2446    if( npar >= 9 ){ c = parvec[8]; if( c <= 0.10f || c >= 10.0f ) c = 1.0f; }
2447    LOAD_DIAG_MAT44( dd , a,b,c ) ;
2448 
2449    if( pgmat ) DUMP_MAT44("GA_setup_affine dd",dd) ;
2450 
2451    /* ss = shear */
2452 
2453    a = b = c = 0.0f ;
2454    if( npar >= 10 ){ a = parvec[ 9]; if( MYfabsf(a) > 0.3333f ) a = 0.0f; }
2455    if( npar >= 11 ){ b = parvec[10]; if( MYfabsf(b) > 0.3333f ) b = 0.0f; }
2456    if( npar >= 12 ){ c = parvec[11]; if( MYfabsf(c) > 0.3333f ) c = 0.0f; }
2457 #if 1
2458    switch( smat ){
2459      default:
2460      case SMAT_LOWER: LOAD_MAT44( ss , 1.0 , 0.0 , 0.0 , 0.0,
2461                                         a  , 1.0 , 0.0 , 0.0,
2462                                         b  ,  c  , 1.0 , 0.0 ) ; break ;
2463 
2464      case SMAT_UPPER: LOAD_MAT44( ss , 1.0 ,  a  ,  b , 0.0 ,
2465                                        0.0 , 1.0 ,  c , 0.0 ,
2466                                        0.0 , 0.0 , 1.0, 0.0  ) ; break ;
2467 
2468      case SMAT_XXX:   LOAD_MAT44( ss , 1.0 ,  a  ,  b , 0.0 ,
2469                                        0.0 , 1.0 , 0.0, 0.0 ,
2470                                        0.0 , 0.0 , 1.0, 0.0  ) ; break ;
2471 
2472      case SMAT_YYY:   LOAD_MAT44( ss , 1.0 , 0.0 , 0.0, 0.0 ,
2473                                         a  , 1.0 ,  b , 0.0 ,
2474                                        0.0 , 0.0 , 1.0, 0.0  ) ; break ;
2475 
2476      case SMAT_ZZZ:   LOAD_MAT44( ss , 1.0 , 0.0 , 0.0, 0.0 ,
2477                                        0.0 , 1.0 , 0.0, 0.0 ,
2478                                         a  ,  b  , 1.0, 0.0  ) ; break ;
2479    }
2480 #else
2481    ksm = (smat % 3) ;           /* decode:  smat = ism*9 + jsm*3 + ksm    */
2482    ism = (smat / 9) ;           /* (ism,jsm,ksm) = permutation of (0,1,2) */
2483    jsm = (smat - 9*ism - ksm) ;
2484    LOAD_DIAG_MAT44( ss , 1.0 , 1.0 , 1.0 ) ;  /* identity */
2485    ss.m[jsm][ism] = a ;
2486    ss.m[ksm][ism] = b ;
2487    ss.m[ksm][jsm] = c ;
2488 #endif
2489 
2490    if( pgmat ) DUMP_MAT44("GA_setup_affine ss",ss) ;
2491 
2492    /* multiply them, as ordered */
2493 
2494    switch( matorder ){
2495      default:
2496      case MATORDER_SDU:  aa = MAT44_MUL(ss,dd) ; bb = uu ; break ;
2497      case MATORDER_SUD:  aa = MAT44_MUL(ss,uu) ; bb = dd ; break ;
2498      case MATORDER_DSU:  aa = MAT44_MUL(dd,ss) ; bb = uu ; break ;
2499      case MATORDER_DUS:  aa = MAT44_MUL(dd,uu) ; bb = ss ; break ;
2500      case MATORDER_USD:  aa = MAT44_MUL(uu,ss) ; bb = dd ; break ;
2501      case MATORDER_UDS:  aa = MAT44_MUL(uu,dd) ; bb = ss ; break ;
2502    }
2503    gam = MAT44_MUL(aa,bb) ;
2504 
2505    /* shifts */
2506 
2507    a = b = c = 0.0f ;
2508    if( npar >= 1 ) a = parvec[0] ;
2509    if( npar >= 2 ) b = parvec[1] ;
2510    if( npar >= 3 ) c = parvec[2] ;
2511 
2512    if( dcode == DELTA_BEFORE ){
2513      MAT44_VEC( gam , a,b,c , p,q,r ) ;
2514      a = p ; b = q ; c = r ;
2515    }
2516    LOAD_MAT44_VEC( gam , a,b,c ) ;
2517 
2518    if( pgmat ) DUMP_MAT44("GA_setup_affine gam (xyz)",gam) ;
2519 
2520    /* before and after transformations? */
2521 
2522    aff_gamxyz = gam ;
2523 
2524    if( pgmat && aff_use_before ) DUMP_MAT44("GA_setup_affine before",aff_before) ;
2525    if( pgmat && aff_use_after  ) DUMP_MAT44("GA_setup_affine after ",aff_after ) ;
2526 
2527    if( aff_use_before ) gam = MAT44_MUL( gam , aff_before ) ;
2528    if( aff_use_after  ) gam = MAT44_MUL( aff_after , gam  ) ;
2529 
2530    aff_gamijk = gam ;
2531 
2532 #if 0
2533    if( mverb > 1 ){
2534      if( aff_use_before ) DUMP_MAT44("before",aff_before) ;
2535      if( aff_use_after  ) DUMP_MAT44("after ",aff_after ) ;
2536                           DUMP_MAT44("gam   ",gam       ) ;
2537    }
2538 #endif
2539 
2540    if( pgmat ) DUMP_MAT44("GA_setup_affine gam (ijk)",gam) ;
2541 
2542    return gam ;
2543 }
2544 
2545 /*--------------------------------------------------------------------------*/
2546 /*! A wfunc function for affine transformations. */
2547 /*--------------------------------------------------------------------------*/
2548 
mri_genalign_affine(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)2549 void mri_genalign_affine( int npar, float *wpar ,
2550                           int npt , float *xi, float *yi, float *zi ,
2551                                     float *xo, float *yo, float *zo  )
2552 {
2553    static mat44 gam ;  /* saved general affine matrix */
2554    int ii ;
2555 
2556    /** new parameters ==> setup matrix */
2557 
2558    if( npar > 0 && wpar != NULL ){
2559      gam = GA_setup_affine( npar , wpar ) ;
2560      if( pgmat ) DUMP_MAT44("mri_genalign_affine",gam) ;
2561    }
2562 
2563    /* nothing to transform? */
2564 
2565    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
2566 
2567    /* multiply matrix times input vectors */
2568 
2569  AFNI_OMP_START ;
2570 #pragma omp parallel if( npt > 33333 )
2571  { int ii ;
2572 #pragma omp for
2573    for( ii=0 ; ii < npt ; ii++ )
2574      MAT44_VEC( gam , xi[ii],yi[ii],zi[ii] , xo[ii],yo[ii],zo[ii] ) ;
2575  }
2576  AFNI_OMP_END ;
2577 
2578    return ;
2579 }
2580 
2581 /*--------------------------------------------------------------------------*/
2582 /*! Similar to mri_genalign_affine(), but the 12 parameters are the matrix
2583     directly, with no physical interpretations such as angles, etc.
2584   * That is, this is the index-to-index matrix, not the coord-to-coord
2585     matrix (befafter stuff doesn't apply here, unlike mri_genalign_affine).
2586 *//*------------------------------------------------------------------------*/
2587 
mri_genalign_mat44(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)2588 void mri_genalign_mat44( int npar, float *wpar,
2589                          int npt , float *xi, float *yi, float *zi ,
2590                                    float *xo, float *yo, float *zo  )
2591 {
2592    static mat44 gam ;  /* saved general affine matrix */
2593    int ii ;
2594 
2595    /** new parameters ==> setup matrix */
2596 
2597    if( npar >= 12 && wpar != NULL ){
2598      LOAD_MAT44_AR(gam,wpar) ;
2599      if( pgmat ) DUMP_MAT44("mri_genalign_mat44",gam) ;
2600    }
2601 
2602    /* nothing to transform? */
2603 
2604    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
2605 
2606    /* multiply matrix times input vectors */
2607 
2608  AFNI_OMP_START ;
2609 #pragma omp parallel if( npt > 33333 )
2610  { int ii ;
2611    for( ii=0 ; ii < npt ; ii++ )
2612      MAT44_VEC( gam , xi[ii],yi[ii],zi[ii] , xo[ii],yo[ii],zo[ii] ) ;
2613  }
2614  AFNI_OMP_END ;
2615 
2616    return ;
2617 }
2618 
2619 #ifdef ALLOW_NWARP /*********************************************************/
2620 /*--------------------------------------------------------------------------*/
2621 /*! A wfunc function for bilinear warp alignment. */
2622 
mri_genalign_bilinear(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)2623 void mri_genalign_bilinear( int npar, float *wpar ,
2624                             int npt , float *xi, float *yi, float *zi ,
2625                                       float *xo, float *yo, float *zo  )
2626 {
2627    static mat44 gam ;  /* saved general affine matrix */
2628    static float dd_for[3][3][3] , xcen,ycen,zcen,dd_fac ;
2629    static int ddiag=0 ;  /* is dd matrix diagonal? */
2630 
2631    /** new parameters ==> setup matrix */
2632 
2633    if( npar >= 43 && wpar != NULL ){  /* 39 'real' parameters, 4 'fake' ones */
2634      float dmag , emag ;
2635 
2636      xcen   = wpar[39] ;              /* the fake (non-varying) parameters */
2637      ycen   = wpar[40] ;
2638      zcen   = wpar[41] ;
2639      dd_fac = wpar[42] ;
2640 
2641      gam = GA_setup_affine( 12 , wpar ) ;
2642 
2643      dd_for[0][0][0] = wpar[12] * dd_fac ;  /* the real parameters */
2644      dd_for[0][0][1] = wpar[13] * dd_fac ;
2645      dd_for[0][0][2] = wpar[14] * dd_fac ;
2646      emag = MYfabsf(wpar[12])+MYfabsf(wpar[13])+MYfabsf(wpar[14]) ;
2647 
2648      dd_for[0][1][0] = wpar[15] * dd_fac ;
2649      dd_for[0][1][1] = wpar[16] * dd_fac ;
2650      dd_for[0][1][2] = wpar[17] * dd_fac ;
2651      ddiag = (wpar[15]==0.0f) && (wpar[16]==0.0f) && (wpar[17]==0.0f) ;
2652      dmag  = MYfabsf(wpar[15])+MYfabsf(wpar[16])+MYfabsf(wpar[17]) ;
2653 
2654      dd_for[0][2][0] = wpar[18] * dd_fac ;
2655      dd_for[0][2][1] = wpar[19] * dd_fac ;
2656      dd_for[0][2][2] = wpar[20] * dd_fac ;
2657      ddiag = ddiag && (wpar[18]==0.0f) && (wpar[19]==0.0f) && (wpar[20]==0.0f) ;
2658      dmag += MYfabsf(wpar[18])+MYfabsf(wpar[19])+MYfabsf(wpar[20]) ;
2659 
2660      dd_for[1][0][0] = wpar[21] * dd_fac ;
2661      dd_for[1][0][1] = wpar[22] * dd_fac ;
2662      dd_for[1][0][2] = wpar[23] * dd_fac ;
2663      ddiag = ddiag && (wpar[21]==0.0f) && (wpar[22]==0.0f) && (wpar[23]==0.0f) ;
2664      dmag += MYfabsf(wpar[21])+MYfabsf(wpar[22])+MYfabsf(wpar[23]) ;
2665 
2666      dd_for[1][1][0] = wpar[24] * dd_fac ;
2667      dd_for[1][1][1] = wpar[25] * dd_fac ;
2668      dd_for[1][1][2] = wpar[26] * dd_fac ;
2669      emag += MYfabsf(wpar[24])+MYfabsf(wpar[25])+MYfabsf(wpar[26]) ;
2670 
2671      dd_for[1][2][0] = wpar[27] * dd_fac ;
2672      dd_for[1][2][1] = wpar[28] * dd_fac ;
2673      dd_for[1][2][2] = wpar[29] * dd_fac ;
2674      ddiag = ddiag && (wpar[27]==0.0f) && (wpar[28]==0.0f) && (wpar[29]==0.0f) ;
2675      dmag += MYfabsf(wpar[27])+MYfabsf(wpar[28])+MYfabsf(wpar[29]) ;
2676 
2677      dd_for[2][0][0] = wpar[30] * dd_fac ;
2678      dd_for[2][0][1] = wpar[31] * dd_fac ;
2679      dd_for[2][0][2] = wpar[32] * dd_fac ;
2680      ddiag = ddiag && (wpar[30]==0.0f) && (wpar[31]==0.0f) && (wpar[32]==0.0f) ;
2681      dmag += MYfabsf(wpar[30])+MYfabsf(wpar[31])+MYfabsf(wpar[32]) ;
2682 
2683      dd_for[2][1][0] = wpar[33] * dd_fac ;
2684      dd_for[2][1][1] = wpar[34] * dd_fac ;
2685      dd_for[2][1][2] = wpar[35] * dd_fac ;
2686      ddiag = ddiag && (wpar[33]==0.0f) && (wpar[34]==0.0f) && (wpar[35]==0.0f) ;
2687      dmag += MYfabsf(wpar[33])+MYfabsf(wpar[34])+MYfabsf(wpar[35]) ;
2688      dmag /= 18.0f ;
2689 
2690      dd_for[2][2][0] = wpar[36] * dd_fac ;
2691      dd_for[2][2][1] = wpar[37] * dd_fac ;
2692      dd_for[2][2][2] = wpar[38] * dd_fac ;
2693      emag += MYfabsf(wpar[36])+MYfabsf(wpar[37])+MYfabsf(wpar[38]) ;
2694      emag /= 9.0f ;
2695 
2696 #if 0
2697 INFO_message("bilinear warp %s diagonal: %.7g %.7g %.3g",
2698              ddiag ? "is" : "isn't" , emag , dmag , (emag>0.0f)?(dmag/emag):(-1.0f) ) ;
2699 #endif
2700    }
2701 
2702    /* nothing to transform? */
2703 
2704    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
2705 
2706    /* multiply matrix times input vectors */
2707 
2708  AFNI_OMP_START ;
2709 #pragma omp parallel if( npt > 22222 )
2710  { int ii ; THD_mat33 dd,ee ; float aa,bb,cc , uu,vv,ww ;
2711 #pragma omp for
2712    for( ii=0 ; ii < npt ; ii++ ){
2713 
2714      aa = xi[ii] ; bb = yi[ii] ; cc = zi[ii] ;
2715 
2716      if( aff_use_before ){
2717        MAT44_VEC( aff_before , aa,bb,cc , uu,vv,ww ) ;
2718      } else {
2719        uu = aa ; vv = bb ; ww = cc ;
2720      }
2721      uu -= xcen; vv -= ycen; ww -= zcen;  /* centered coords */
2722 
2723      /* compute denominator (dd) matrix from coordinates and parameters */
2724 
2725      dd.mat[0][0] = 1.0f + dd_for[0][0][0]*uu + dd_for[0][0][1]*vv + dd_for[0][0][2]*ww;
2726      dd.mat[1][1] = 1.0f + dd_for[1][1][0]*uu + dd_for[1][1][1]*vv + dd_for[1][1][2]*ww;
2727      dd.mat[2][2] = 1.0f + dd_for[2][2][0]*uu + dd_for[2][2][1]*vv + dd_for[2][2][2]*ww;
2728      if( !ddiag ){
2729        dd.mat[0][1] =      dd_for[0][1][0]*uu + dd_for[0][1][1]*vv + dd_for[0][1][2]*ww;
2730        dd.mat[0][2] =      dd_for[0][2][0]*uu + dd_for[0][2][1]*vv + dd_for[0][2][2]*ww;
2731        dd.mat[1][0] =      dd_for[1][0][0]*uu + dd_for[1][0][1]*vv + dd_for[1][0][2]*ww;
2732        dd.mat[1][2] =      dd_for[1][2][0]*uu + dd_for[1][2][1]*vv + dd_for[1][2][2]*ww;
2733        dd.mat[2][0] =      dd_for[2][0][0]*uu + dd_for[2][0][1]*vv + dd_for[2][0][2]*ww;
2734        dd.mat[2][1] =      dd_for[2][1][0]*uu + dd_for[2][1][1]*vv + dd_for[2][1][2]*ww;
2735        ee = MAT_INV(dd) ;
2736      } else {
2737        LOAD_DIAG_MAT( ee , 1.0f / dd.mat[0][0] ,  /* diagonal dd matrix case */
2738                            1.0f / dd.mat[1][1] ,
2739                            1.0f / dd.mat[2][2]  ) ;
2740      }
2741 
2742      MAT44_VEC( gam , aa,bb,cc, uu,vv,ww ) ;  /* affine part of transformation */
2743 
2744      /* apply inverse of denominator matrix to affinely transformed vector */
2745 
2746      if( !ddiag ){
2747        xo[ii] = ee.mat[0][0] * uu + ee.mat[0][1] * vv + ee.mat[0][2] * ww ;
2748        yo[ii] = ee.mat[1][0] * uu + ee.mat[1][1] * vv + ee.mat[1][2] * ww ;
2749        zo[ii] = ee.mat[2][0] * uu + ee.mat[2][1] * vv + ee.mat[2][2] * ww ;
2750      } else {
2751        xo[ii] = ee.mat[0][0] * uu ;  /* diagonal dd matrix case */
2752        yo[ii] = ee.mat[1][1] * vv ;
2753        zo[ii] = ee.mat[2][2] * ww ;
2754      }
2755 
2756    } /* end of loop over input points */
2757  }
2758  AFNI_OMP_END ;
2759 
2760    return ;
2761 }
2762 
2763 /*--------------------------------------------------------------------------*/
2764 
2765 /* Legendre polynomials */
2766 
2767 #define LP1(x) (x)
2768 #define LP2(x) ((x)*(x)-0.3333333f)
2769 #define LP3(x) (((x)*(x)-0.6f)*(x))*1.5f
2770 #define LP4(x) ((x)*(x)*((x)*(x)-0.857143f)+0.0857143f)*2.0f
2771 #define LP5(x) (((x)*(x)*((x)*(x)-1.11111f)+0.238095f)*(x))*3.0f
2772 #define LP6(x) ((x)*(x)*((x)*(x)*((x)*(x)-1.36364f)+0.454545f)-0.021645f)*6.0f
2773 #define LP7(x) (((x)*(x)*((x)*(x)*((x)*(x)-1.61538f)+0.734266f)-0.081585f)*(x))*10.0f
2774 
2775 #define LP8(x) ( (x)*(x) * \
2776                ( (x)*(x) * \
2777                ( (x)*(x) * \
2778                ( (x)*(x) - 1.86667f ) + 1.07692f ) - 0.195804f ) + 0.0054390f )*18.0f
2779 
2780 #define LP9(x) ( ( (x)*(x) * \
2781                  ( (x)*(x) * \
2782                  ( (x)*(x) * \
2783                  ( (x)*(x) - 2.11765f ) + 1.48235f ) - 0.380090f ) + 0.0259153f ) * (x) )*32.0f
2784 
2785 /* Gegenbauer (alpha=-0.5) polynomials */
2786 
2787 #define GP1(x) (x)
2788 #define GP2(x) (0.1666667f-0.5f*(x)*(x))              /* G2(x)-1/3 : orthogonal to 1 */
2789 #define GP3(x) ((0.3f-0.5f*(x)*(x))*(x))              /* G3(x)-x/5 : orthogonal to x */
2790 #define GP4(x) (-0.125f+(0.75f-0.625f*(x)*(x))*(x)*(x))
2791 #define GP5(x) ((-0.375f+(1.25f-0.875f*(x)*(x))*(x)*(x))*(x))
2792 #define GP6(x) (0.0625f+(-0.9375f+(2.1875f-1.3125f*(x)*(x))*(x)*(x))*(x)*(x))
2793 #define GP7(x) ((0.3125f+(-2.1875f+(3.9375f-2.0625f*(x)*(x))*(x)*(x))*(x)*(x))*(x))
2794 #define GP8(x) (-0.0390625f+(1.09375f+(-4.921875f+(7.21875f-3.3515625f*(x)*(x))*(x)*(x))*(x)*(x))*(x)*(x))
2795 #define GP9(x) ((-0.2734375f+(3.28125f+(-10.828125f+(13.40625f-5.5859375f*(x)*(x))*(x)*(x))*(x)*(x))*(x)*(x))*(x))
2796 
2797 /* Hermite polynomials */
2798 
2799 #define HP1(x) (x)
2800 #define HP2(x) 0.4f*((x)*(x)-2.0f)
2801 #define HP3(x) 0.2f*((x)*(2.0f*(x)*(x)-3.0f))
2802 #define HP4(x) 0.05f*((4.0f*(x)*(x)-12.0f)*(x)*(x)+3.0f)
2803 #define HP5(x) 0.15f*((x)*((4.0f*(x)*(x)-20.0f)*(x)*(x)+15.0f))
2804 #define HP6(x) 0.2f*(((2.0f*(x)*(x)-15.0f)*(x)*(x)+22.5f)*(x)*(x)-3.75f)
2805 #define HP7(x) 0.06f*(0.5f*(x)*(((8.0f*(x)*(x)-84.0f)*(x)*(x)+210.0f)*(x)*(x)-105.0f))
2806 #define HP8(x) 0.1f*(((((x)*(x)-14.0f)*(x)*(x)+52.5f)*(x)*(x)-52.5f)*(x)*(x)+6.5625f)
2807 #define HP9(x) 0.004f*((x)*((((16.0f*(x)*(x)-288.0f)*(x)*(x)+1512.0f)*(x)*(x)-2520.0f)*(x)*(x)+945.0f))
2808 
2809 #define HH1(x) (x)
2810 #define HH2(x) HP2(3.0f*(x))*exp(-2.0f*(x)*(x))
2811 #define HH3(x) HP3(3.0f*(x))*exp(-3.0f*(x)*(x))
2812 #define HH4(x) HP4(3.0f*(x))*exp(-5.0f*(x)*(x))
2813 #define HH5(x) HP5(3.0f*(x))*exp(-6.0f*(x)*(x))
2814 #define HH6(x) HP6(3.0f*(x))*exp(-6.0f*(x)*(x))
2815 #define HH7(x) HP7(3.0f*(x))*exp(-7.0f*(x)*(x))
2816 #define HH8(x) HP8(3.0f*(x))*exp(-7.0f*(x)*(x))
2817 #define HH9(x) HP9(3.0f*(x))*exp(-7.0f*(x)*(x))
2818 
2819 #undef USE_GEGEN
2820 #undef USE_HERMITE
2821 
2822 /*----------------------------------------------------------------------------*/
2823 #define PP1(x) (x)
2824 
LEG2(float x)2825 static float LEG2(float x){ return LP2(x); }  /* 28 Mar 2013: */
LEG3(float x)2826 static float LEG3(float x){ return LP3(x); }  /* switch the PPn(x) from */
LEG4(float x)2827 static float LEG4(float x){ return LP4(x); }  /* being macros to functions */
LEG5(float x)2828 static float LEG5(float x){ return LP5(x); }
LEG6(float x)2829 static float LEG6(float x){ return LP6(x); }
LEG7(float x)2830 static float LEG7(float x){ return LP7(x); }
LEG8(float x)2831 static float LEG8(float x){ return LP8(x); }
LEG9(float x)2832 static float LEG9(float x){ return LP9(x); }
2833 
HER2(float x)2834 static float HER2(float x){ return HH2(x); }
HER3(float x)2835 static float HER3(float x){ return HH3(x); }
HER4(float x)2836 static float HER4(float x){ return HH4(x); }
HER5(float x)2837 static float HER5(float x){ return HH5(x); }
HER6(float x)2838 static float HER6(float x){ return HH6(x); }
HER7(float x)2839 static float HER7(float x){ return HH7(x); }
HER8(float x)2840 static float HER8(float x){ return HH8(x); }
HER9(float x)2841 static float HER9(float x){ return HH9(x); }
2842 
2843 static float (*PP2)(float) = LEG2 ;
2844 static float (*PP3)(float) = LEG3 ;
2845 static float (*PP4)(float) = LEG4 ;
2846 static float (*PP5)(float) = LEG5 ;
2847 static float (*PP6)(float) = LEG6 ;
2848 static float (*PP7)(float) = LEG7 ;
2849 static float (*PP8)(float) = LEG8 ;
2850 static float (*PP9)(float) = LEG9 ;
2851 
GA_setup_polywarp(int pcode)2852 void GA_setup_polywarp( int pcode )  /* 28 Mar 2013 */
2853 {
2854    switch( pcode ){
2855      default:
2856        PP2 = LEG2 ; PP3 = LEG3 ; PP4 = LEG4 ; PP5 = LEG5 ;
2857        PP6 = LEG6 ; PP7 = LEG7 ; PP8 = LEG8 ; PP9 = LEG9 ;
2858      break ;
2859 
2860      case GA_HERMITE:
2861        PP2 = HER2 ; PP3 = HER3 ; PP4 = HER4 ; PP5 = HER5 ;
2862        PP6 = HER6 ; PP7 = HER7 ; PP8 = HER8 ; PP9 = HER9 ;
2863      break ;
2864    }
2865    return ;
2866 }
2867 
2868 /*********************************/
2869 #if 0
2870 #ifdef USE_GEGEN
2871 # define PP1 GP1
2872 # define PP2 GP2
2873 # define PP3 GP3
2874 # define PP4 GP4
2875 # define PP5 GP5
2876 # define PP6 GP6
2877 # define PP7 GP7
2878 # define PP8 GP8
2879 # define PP9 GP9
2880 #endif
2881 #ifdef USE_HERMITE
2882 # define PP1 HH1
2883 # define PP2 HH2
2884 # define PP3 HH3
2885 # define PP4 HH4
2886 # define PP5 HH5
2887 # define PP6 HH6
2888 # define PP7 HH7
2889 # define PP8 HH8
2890 # define PP9 HH9
2891 #else
2892 # define PP1 LP1
2893 # define PP2 LP2
2894 # define PP3 LP3
2895 # define PP4 LP4
2896 # define PP5 LP5
2897 # define PP6 LP6
2898 # define PP7 LP7
2899 # define PP8 LP8
2900 # define PP9 LP9
2901 #endif
2902 #endif
2903 /*********************************/
2904 
2905 /* 3D product functions of various orders 2..9 */
2906 
2907 #define P2_xx(x,y,z) PP2(x)
2908 #define P2_xy(x,y,z) PP1(x)*PP1(y)
2909 #define P2_xz(x,y,z) PP1(x)*PP1(z)
2910 #define P2_yy(x,y,z) PP2(y)
2911 #define P2_yz(x,y,z) PP1(y)*PP1(z)
2912 #define P2_zz(x,y,z) PP2(z)
2913 
2914 #define P3_xxx(x,y,z) PP3(x)
2915 #define P3_xxy(x,y,z) PP2(x)*PP1(y)
2916 #define P3_xxz(x,y,z) PP2(x)*PP1(z)
2917 #define P3_xyy(x,y,z) PP1(x)*PP2(y)
2918 #define P3_xzz(x,y,z) PP1(x)*PP2(z)
2919 #define P3_xyz(x,y,z) PP1(x)*PP1(y)*PP1(z)
2920 #define P3_yyy(x,y,z) PP3(y)
2921 #define P3_yyz(x,y,z) PP2(y)*PP1(z)
2922 #define P3_yzz(x,y,z) PP1(y)*PP2(z)
2923 #define P3_zzz(x,y,z) PP3(z)
2924 
2925 #define P4_xxxx(x,y,z) PP4(x)
2926 #define P4_xxxy(x,y,z) PP3(x)*PP1(y)
2927 #define P4_xxxz(x,y,z) PP3(x)*PP1(z)
2928 #define P4_xxyy(x,y,z) PP2(x)*PP2(y)
2929 #define P4_xxzz(x,y,z) PP2(x)*PP2(z)
2930 #define P4_xxyz(x,y,z) PP2(x)*PP1(y)*PP1(z)
2931 #define P4_xyyy(x,y,z) PP1(x)*PP3(y)
2932 #define P4_xyyz(x,y,z) PP1(x)*PP2(y)*PP1(z)
2933 #define P4_xyzz(x,y,z) PP1(x)*PP1(y)*PP2(z)
2934 #define P4_xzzz(x,y,z) PP1(x)*PP3(z)
2935 #define P4_yyyy(x,y,z) PP4(y)
2936 #define P4_yyyz(x,y,z) PP3(y)*PP1(z)
2937 #define P4_yyzz(x,y,z) PP2(y)*PP2(z)
2938 #define P4_yzzz(x,y,z) PP1(y)*PP3(z)
2939 #define P4_zzzz(x,y,z) PP4(z)
2940 
2941 #define P5_xxxxx(x,y,z) PP5(x)
2942 #define P5_xxxxy(x,y,z) PP4(x)*PP1(y)
2943 #define P5_xxxxz(x,y,z) PP4(x)*PP1(z)
2944 #define P5_xxxyy(x,y,z) PP3(x)*PP2(y)
2945 #define P5_xxxzz(x,y,z) PP3(x)*PP2(z)
2946 #define P5_xxxyz(x,y,z) PP3(x)*PP1(y)*PP1(z)
2947 #define P5_xxyyy(x,y,z) PP2(x)*PP3(y)
2948 #define P5_xxyyz(x,y,z) PP2(x)*PP2(y)*PP1(z)
2949 #define P5_xxyzz(x,y,z) PP2(x)*PP1(y)*PP2(z)
2950 #define P5_xxzzz(x,y,z) PP2(x)*PP3(z)
2951 #define P5_xyyyy(x,y,z) PP1(x)*PP4(y)
2952 #define P5_xyyyz(x,y,z) PP1(x)*PP3(y)*PP1(z)
2953 #define P5_xyyzz(x,y,z) PP1(x)*PP2(y)*PP2(z)
2954 #define P5_xyzzz(x,y,z) PP1(x)*PP1(y)*PP3(z)
2955 #define P5_xzzzz(x,y,z) PP1(x)*PP4(z)
2956 #define P5_yyyyy(x,y,z) PP5(y)
2957 #define P5_yyyyz(x,y,z) PP4(y)*PP1(z)
2958 #define P5_yyyzz(x,y,z) PP3(y)*PP2(z)
2959 #define P5_yyzzz(x,y,z) PP2(y)*PP3(z)
2960 #define P5_yzzzz(x,y,z) PP1(y)*PP4(z)
2961 #define P5_zzzzz(x,y,z) PP5(z)
2962 
2963 #define P6_xxxxxx(x,y,z) PP6(x)
2964 #define P6_xxxxxy(x,y,z) PP5(x)*PP1(y)
2965 #define P6_xxxxxz(x,y,z) PP5(x)*PP1(z)
2966 #define P6_xxxxyy(x,y,z) PP4(x)*PP2(y)
2967 #define P6_xxxxzz(x,y,z) PP4(x)*PP2(z)
2968 #define P6_xxxxyz(x,y,z) PP4(x)*PP1(y)*PP1(z)
2969 #define P6_xxxyyy(x,y,z) PP3(x)*PP3(y)
2970 #define P6_xxxyyz(x,y,z) PP3(x)*PP2(y)*PP1(z)
2971 #define P6_xxxyzz(x,y,z) PP3(x)*PP1(y)*PP2(z)
2972 #define P6_xxxzzz(x,y,z) PP3(x)*PP3(z)
2973 #define P6_xxyyyy(x,y,z) PP2(x)*PP4(y)
2974 #define P6_xxyyyz(x,y,z) PP2(x)*PP3(y)*PP1(z)
2975 #define P6_xxyyzz(x,y,z) PP2(x)*PP2(y)*PP2(z)
2976 #define P6_xxyzzz(x,y,z) PP2(x)*PP1(y)*PP3(z)
2977 #define P6_xxzzzz(x,y,z) PP2(x)*PP4(z)
2978 #define P6_xyyyyy(x,y,z) PP1(x)*PP5(y)
2979 #define P6_xyyyyz(x,y,z) PP1(x)*PP4(y)*PP1(z)
2980 #define P6_xyyyzz(x,y,z) PP1(x)*PP3(y)*PP2(z)
2981 #define P6_xyyzzz(x,y,z) PP1(x)*PP2(y)*PP3(z)
2982 #define P6_xyzzzz(x,y,z) PP1(x)*PP1(y)*PP4(z)
2983 #define P6_xzzzzz(x,y,z) PP1(x)*PP5(z)
2984 #define P6_yyyyyy(x,y,z) PP6(y)
2985 #define P6_yyyyyz(x,y,z) PP5(y)*PP1(z)
2986 #define P6_yyyyzz(x,y,z) PP4(y)*PP2(z)
2987 #define P6_yyyzzz(x,y,z) PP3(y)*PP3(z)
2988 #define P6_yyzzzz(x,y,z) PP2(y)*PP4(z)
2989 #define P6_yzzzzz(x,y,z) PP1(y)*PP5(z)
2990 #define P6_zzzzzz(x,y,z) PP6(z)
2991 
2992 #define P7_xxxxxxx(x,y,z) PP7(x)
2993 #define P7_xxxxxxy(x,y,z) PP6(x)*PP1(y)
2994 #define P7_xxxxxxz(x,y,z) PP6(x)*PP1(z)
2995 #define P7_xxxxxyy(x,y,z) PP5(x)*PP2(y)
2996 #define P7_xxxxxzz(x,y,z) PP5(x)*PP2(z)
2997 #define P7_xxxxxyz(x,y,z) PP5(x)*PP1(y)*PP1(z)
2998 #define P7_xxxxyyy(x,y,z) PP4(x)*PP3(y)
2999 #define P7_xxxxyyz(x,y,z) PP4(x)*PP2(y)*PP1(z)
3000 #define P7_xxxxyzz(x,y,z) PP4(x)*PP1(y)*PP2(z)
3001 #define P7_xxxxzzz(x,y,z) PP4(x)*PP3(z)
3002 #define P7_xxxyyyy(x,y,z) PP3(x)*PP4(y)
3003 #define P7_xxxyyyz(x,y,z) PP3(x)*PP3(y)*PP1(z)
3004 #define P7_xxxyyzz(x,y,z) PP3(x)*PP2(y)*PP2(z)
3005 #define P7_xxxyzzz(x,y,z) PP3(x)*PP1(y)*PP3(z)
3006 #define P7_xxxzzzz(x,y,z) PP3(x)*PP4(z)
3007 #define P7_xxyyyyy(x,y,z) PP2(x)*PP5(y)
3008 #define P7_xxyyyyz(x,y,z) PP2(x)*PP4(y)*PP1(z)
3009 #define P7_xxyyyzz(x,y,z) PP2(x)*PP3(y)*PP2(z)
3010 #define P7_xxyyzzz(x,y,z) PP2(x)*PP2(y)*PP3(z)
3011 #define P7_xxyzzzz(x,y,z) PP2(x)*PP1(y)*PP4(z)
3012 #define P7_xxzzzzz(x,y,z) PP2(x)*PP5(z)
3013 #define P7_xyyyyyy(x,y,z) PP1(x)*PP6(y)
3014 #define P7_xyyyyyz(x,y,z) PP1(x)*PP5(y)*PP1(z)
3015 #define P7_xyyyyzz(x,y,z) PP1(x)*PP4(y)*PP2(z)
3016 #define P7_xyyyzzz(x,y,z) PP1(x)*PP3(y)*PP3(z)
3017 #define P7_xyyzzzz(x,y,z) PP1(x)*PP2(y)*PP4(z)
3018 #define P7_xyzzzzz(x,y,z) PP1(x)*PP1(y)*PP5(z)
3019 #define P7_xzzzzzz(x,y,z) PP1(x)*PP6(z)
3020 #define P7_yyyyyyy(x,y,z) PP7(y)
3021 #define P7_yyyyyyz(x,y,z) PP6(y)*PP1(z)
3022 #define P7_yyyyyzz(x,y,z) PP5(y)*PP2(z)
3023 #define P7_yyyyzzz(x,y,z) PP4(y)*PP3(z)
3024 #define P7_yyyzzzz(x,y,z) PP3(y)*PP4(z)
3025 #define P7_yyzzzzz(x,y,z) PP2(y)*PP5(z)
3026 #define P7_yzzzzzz(x,y,z) PP1(y)*PP6(z)
3027 #define P7_zzzzzzz(x,y,z) PP7(z)
3028 
3029 #define P8_xxxxxxxx(x,y,z) PP8(x)
3030 #define P8_xxxxxxxy(x,y,z) PP7(x)*PP1(y)
3031 #define P8_xxxxxxxz(x,y,z) PP7(x)*PP1(z)
3032 #define P8_xxxxxxyy(x,y,z) PP6(x)*PP2(y)
3033 #define P8_xxxxxxzz(x,y,z) PP6(x)*PP2(z)
3034 #define P8_xxxxxxyz(x,y,z) PP6(x)*PP1(y)*PP1(z)
3035 #define P8_xxxxxyyy(x,y,z) PP5(x)*PP3(y)
3036 #define P8_xxxxxyyz(x,y,z) PP5(x)*PP2(y)*PP1(z)
3037 #define P8_xxxxxyzz(x,y,z) PP5(x)*PP1(y)*PP2(z)
3038 #define P8_xxxxxzzz(x,y,z) PP5(x)*PP3(z)
3039 #define P8_xxxxyyyy(x,y,z) PP4(x)*PP4(y)
3040 #define P8_xxxxyyyz(x,y,z) PP4(x)*PP3(y)*PP1(z)
3041 #define P8_xxxxyyzz(x,y,z) PP4(x)*PP2(y)*PP2(z)
3042 #define P8_xxxxyzzz(x,y,z) PP4(x)*PP1(y)*PP3(z)
3043 #define P8_xxxxzzzz(x,y,z) PP4(x)*PP4(z)
3044 #define P8_xxxyyyyy(x,y,z) PP3(x)*PP5(y)
3045 #define P8_xxxyyyyz(x,y,z) PP3(x)*PP4(y)*PP1(z)
3046 #define P8_xxxyyyzz(x,y,z) PP3(x)*PP3(y)*PP2(z)
3047 #define P8_xxxyyzzz(x,y,z) PP3(x)*PP2(y)*PP3(z)
3048 #define P8_xxxyzzzz(x,y,z) PP3(x)*PP1(y)*PP4(z)
3049 #define P8_xxxzzzzz(x,y,z) PP3(x)*PP5(z)
3050 #define P8_xxyyyyyy(x,y,z) PP2(x)*PP6(y)
3051 #define P8_xxyyyyyz(x,y,z) PP2(x)*PP5(y)*PP1(z)
3052 #define P8_xxyyyyzz(x,y,z) PP2(x)*PP4(y)*PP2(z)
3053 #define P8_xxyyyzzz(x,y,z) PP2(x)*PP3(y)*PP3(z)
3054 #define P8_xxyyzzzz(x,y,z) PP2(x)*PP2(y)*PP4(z)
3055 #define P8_xxyzzzzz(x,y,z) PP2(x)*PP1(y)*PP5(z)
3056 #define P8_xxzzzzzz(x,y,z) PP2(x)*PP6(z)
3057 #define P8_xyyyyyyy(x,y,z) PP1(x)*PP7(y)
3058 #define P8_xyyyyyyz(x,y,z) PP1(x)*PP6(y)*PP1(z)
3059 #define P8_xyyyyyzz(x,y,z) PP1(x)*PP5(y)*PP2(z)
3060 #define P8_xyyyyzzz(x,y,z) PP1(x)*PP4(y)*PP3(z)
3061 #define P8_xyyyzzzz(x,y,z) PP1(x)*PP3(y)*PP4(z)
3062 #define P8_xyyzzzzz(x,y,z) PP1(x)*PP2(y)*PP5(z)
3063 #define P8_xyzzzzzz(x,y,z) PP1(x)*PP1(y)*PP6(z)
3064 #define P8_xzzzzzzz(x,y,z) PP1(x)*PP7(z)
3065 #define P8_yyyyyyyy(x,y,z) PP8(y)
3066 #define P8_yyyyyyyz(x,y,z) PP7(y)*PP1(z)
3067 #define P8_yyyyyyzz(x,y,z) PP6(y)*PP2(z)
3068 #define P8_yyyyyzzz(x,y,z) PP5(y)*PP3(z)
3069 #define P8_yyyyzzzz(x,y,z) PP4(y)*PP4(z)
3070 #define P8_yyyzzzzz(x,y,z) PP3(y)*PP5(z)
3071 #define P8_yyzzzzzz(x,y,z) PP2(y)*PP6(z)
3072 #define P8_yzzzzzzz(x,y,z) PP1(y)*PP7(z)
3073 #define P8_zzzzzzzz(x,y,z) PP8(z)
3074 
3075 #define P9_xxxxxxxxx(x,y,z) PP9(x)
3076 #define P9_xxxxxxxxy(x,y,z) PP8(x)*PP1(y)
3077 #define P9_xxxxxxxxz(x,y,z) PP8(x)*PP1(z)
3078 #define P9_xxxxxxxyy(x,y,z) PP7(x)*PP2(y)
3079 #define P9_xxxxxxxzz(x,y,z) PP7(x)*PP2(z)
3080 #define P9_xxxxxxxyz(x,y,z) PP7(x)*PP1(y)*PP1(z)
3081 #define P9_xxxxxxyyy(x,y,z) PP6(x)*PP3(y)
3082 #define P9_xxxxxxyyz(x,y,z) PP6(x)*PP2(y)*PP1(z)
3083 #define P9_xxxxxxyzz(x,y,z) PP6(x)*PP1(y)*PP2(z)
3084 #define P9_xxxxxxzzz(x,y,z) PP6(x)*PP3(z)
3085 #define P9_xxxxxyyyy(x,y,z) PP5(x)*PP4(y)
3086 #define P9_xxxxxyyyz(x,y,z) PP5(x)*PP3(y)*PP1(z)
3087 #define P9_xxxxxyyzz(x,y,z) PP5(x)*PP2(y)*PP2(z)
3088 #define P9_xxxxxyzzz(x,y,z) PP5(x)*PP1(y)*PP3(z)
3089 #define P9_xxxxxzzzz(x,y,z) PP5(x)*PP4(z)
3090 #define P9_xxxxyyyyy(x,y,z) PP4(x)*PP5(y)
3091 #define P9_xxxxyyyyz(x,y,z) PP4(x)*PP4(y)*PP1(z)
3092 #define P9_xxxxyyyzz(x,y,z) PP4(x)*PP3(y)*PP2(z)
3093 #define P9_xxxxyyzzz(x,y,z) PP4(x)*PP2(y)*PP3(z)
3094 #define P9_xxxxyzzzz(x,y,z) PP4(x)*PP1(y)*PP4(z)
3095 #define P9_xxxxzzzzz(x,y,z) PP4(x)*PP5(z)
3096 #define P9_xxxyyyyyy(x,y,z) PP3(x)*PP6(y)
3097 #define P9_xxxyyyyyz(x,y,z) PP3(x)*PP5(y)*PP1(z)
3098 #define P9_xxxyyyyzz(x,y,z) PP3(x)*PP4(y)*PP2(z)
3099 #define P9_xxxyyyzzz(x,y,z) PP3(x)*PP3(y)*PP3(z)
3100 #define P9_xxxyyzzzz(x,y,z) PP3(x)*PP2(y)*PP4(z)
3101 #define P9_xxxyzzzzz(x,y,z) PP3(x)*PP1(y)*PP5(z)
3102 #define P9_xxxzzzzzz(x,y,z) PP3(x)*PP6(z)
3103 #define P9_xxyyyyyyy(x,y,z) PP2(x)*PP7(y)
3104 #define P9_xxyyyyyyz(x,y,z) PP2(x)*PP6(y)*PP1(z)
3105 #define P9_xxyyyyyzz(x,y,z) PP2(x)*PP5(y)*PP2(z)
3106 #define P9_xxyyyyzzz(x,y,z) PP2(x)*PP4(y)*PP3(z)
3107 #define P9_xxyyyzzzz(x,y,z) PP2(x)*PP3(y)*PP4(z)
3108 #define P9_xxyyzzzzz(x,y,z) PP2(x)*PP2(y)*PP5(z)
3109 #define P9_xxyzzzzzz(x,y,z) PP2(x)*PP1(y)*PP6(z)
3110 #define P9_xxzzzzzzz(x,y,z) PP2(x)*PP7(z)
3111 #define P9_xyyyyyyyy(x,y,z) PP1(x)*PP8(y)
3112 #define P9_xyyyyyyyz(x,y,z) PP1(x)*PP7(y)*PP1(z)
3113 #define P9_xyyyyyyzz(x,y,z) PP1(x)*PP6(y)*PP2(z)
3114 #define P9_xyyyyyzzz(x,y,z) PP1(x)*PP5(y)*PP3(z)
3115 #define P9_xyyyyzzzz(x,y,z) PP1(x)*PP4(y)*PP4(z)
3116 #define P9_xyyyzzzzz(x,y,z) PP1(x)*PP3(y)*PP5(z)
3117 #define P9_xyyzzzzzz(x,y,z) PP1(x)*PP2(y)*PP6(z)
3118 #define P9_xyzzzzzzz(x,y,z) PP1(x)*PP1(y)*PP7(z)
3119 #define P9_xzzzzzzzz(x,y,z) PP1(x)*PP8(z)
3120 #define P9_yyyyyyyyy(x,y,z) PP9(y)
3121 #define P9_yyyyyyyyz(x,y,z) PP8(y)*PP1(z)
3122 #define P9_yyyyyyyzz(x,y,z) PP7(y)*PP2(z)
3123 #define P9_yyyyyyzzz(x,y,z) PP6(y)*PP3(z)
3124 #define P9_yyyyyzzzz(x,y,z) PP5(y)*PP4(z)
3125 #define P9_yyyyzzzzz(x,y,z) PP4(y)*PP5(z)
3126 #define P9_yyyzzzzzz(x,y,z) PP3(y)*PP6(z)
3127 #define P9_yyzzzzzzz(x,y,z) PP2(y)*PP7(z)
3128 #define P9_yzzzzzzzz(x,y,z) PP1(y)*PP8(z)
3129 #define P9_zzzzzzzzz(x,y,z) PP9(z)
3130 
3131 /* number of polynomials in each set; number of params is times 3 (for x,y,z) */
3132 
3133 #define NPOLCUBI   16  /* = 6+10 */
3134 #define NPOLQUIN   52  /* = 6+10+15+21 */
3135 #define NPOLHEPT  116  /* = 6+10+15+21+28+36 */
3136 #define NPOLNONI  216  /* = 6+10+15+21+28+36+45+55 */
3137 
3138 /* number of nonlinear terms at order k (in each dimension) */
3139 
3140 #define NPOL(k) (((k)+1)*((k)+2)*((k)+3)/6-4)
3141 
3142 /* FIXYZ macro makes sure arg is between -1 and 1 */
3143 
3144 #define PRAMP(x) ( 0.8f + ((x)-0.8f) / (1.0f + 5.0f*((x)-0.8f)) )
3145 #define NRAMP(x) (-0.8f + ((x)+0.8f) / (1.0f - 5.0f*((x)+0.8f)) )
3146 #define FIXYZ(q) if(q > 0.8f) q=PRAMP(q); else if(q < -0.8f) q=NRAMP(q)
3147 
3148 
3149 /*--------------------------------------------------------------------------*/
3150 #define CCx   1
3151 #define CCy   2
3152 #define CCz   4
3153 #define CCxy  3
3154 #define CCxz  5
3155 #define CCyz  6
3156 #define CCxyz 7
3157 
3158 static byte CCoordCCode[NPOLNONI] = {
3159   CCx, CCxy, CCxz, CCy, CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz,
3160   CCy, CCyz, CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz, CCxy, CCxyz,
3161   CCxyz, CCxz, CCy, CCyz, CCyz, CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz,
3162   CCxy, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxz, CCy, CCyz, CCyz, CCyz,
3163   CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz, CCxy, CCxyz, CCxyz, CCxz,
3164   CCxy, CCxyz, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxyz, CCxz, CCy,
3165   CCyz, CCyz, CCyz, CCyz, CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz,
3166   CCxy, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz,
3167   CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxz, CCy, CCyz,
3168   CCyz, CCyz, CCyz, CCyz, CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz,
3169   CCxy, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz,
3170   CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxz, CCxy, CCxyz,
3171   CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxz, CCy, CCyz, CCyz, CCyz, CCyz, CCyz,
3172   CCyz, CCyz, CCz, CCx, CCxy, CCxz, CCxy, CCxz, CCxyz, CCxy, CCxyz, CCxyz,
3173   CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxyz, CCxz,
3174   CCxy, CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxyz,
3175   CCxyz, CCxyz, CCxz, CCxy, CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxyz, CCxz,
3176   CCy, CCyz, CCyz, CCyz, CCyz, CCyz, CCyz, CCyz, CCyz, CCz } ;
3177 
GA_polywarp_coordcode(int pnum)3178 int GA_polywarp_coordcode( int pnum )
3179 {
3180    if( pnum < 0 || pnum >= NPOLNONI ) return 0 ;
3181    return (int)CCoordCCode[pnum] ;
3182 }
3183 
3184 static char *PolyFuncName[NPOLNONI] = {
3185   "x^2" , "x*y" , "x*z" , "y^2" , "y*z" , "z^2" , "x^3" , "x^2*y" ,
3186   "x^2*z" , "x*y^2" , "x*z^2" , "x*y*z" , "y^3" , "y^2*z" , "y*z^2" , "z^3" ,
3187   "x^4" , "x^3*y" , "x^3*z" , "x^2*y^2" , "x^2*z^2" , "x^2*y*z" , "x*y^3" , "x*y^2*z" ,
3188   "x*y*z^2" , "x*z^3" , "y^4" , "y^3*z" , "y^2*z^2" , "y*z^3" , "z^4" , "x^5" , "x^4*y" ,
3189   "x^4*z" , "x^3*y^2" , "x^3*z^2" , "x^3*y*z" , "x^2*y^3" , "x^2*y^2*z" , "x^2*y*z^2" , "x^2*z^3" ,
3190   "x*y^4" , "x*y^3*z" , "x*y^2*z^2" , "x*y*z^3" , "x*z^4" , "y^5" , "y^4*z" , "y^3*z^2" ,
3191   "y^2*z^3" , "y*z^4" , "z^5" , "x^6" , "x^5*y" , "x^5*z" , "x^4*y^2" , "x^4*z^2" ,
3192   "x^4*y*z" , "x^3*y^3" , "x^3*y^2*z" , "x^3*y*z^2" , "x^3*z^3" , "x^2*y^4" , "x^2*y^3*z" , "x^2*y^2*z^2" ,
3193   "x^2*y*z^3" , "x^2*z^4" , "x*y^5" , "x*y^4*z" , "x*y^3*z^2" , "x*y^2*z^3" , "x*y*z^4" , "x*z^5" ,
3194   "y^6" , "y^5*z" , "y^4*z^2" , "y^3*z^3" , "y^2*z^4" , "y*z^5" , "z^6" , "x^7" , "x^6*y" ,
3195   "x^6*z" , "x^5*y^2" , "x^5*z^2" , "x^5*y*z" , "x^4*y^3" , "x^4*y^2*z" , "x^4*y*z^2" , "x^4*z^3" ,
3196   "x^3*y^4" , "x^3*y^3*z" , "x^3*y^2*z^2" , "x^3*y*z^3" , "x^3*z^4" , "x^2*y^5" , "x^2*y^4*z" , "x^2*y^3*z^2" ,
3197   "x^2*y^2*z^3" , "x^2*y*z^4" , "x^2*z^5" , "x*y^6" , "x*y^5*z" , "x*y^4*z^2" , "x*y^3*z^3" , "x*y^2*z^4" ,
3198   "x*y*z^5" , "x*z^6" , "y^7" , "y^6*z" , "y^5*z^2" , "y^4*z^3" , "y^3*z^4" , "y^2*z^5" ,
3199   "y*z^6" , "z^7" , "x^8" , "x^7*y" , "x^7*z" , "x^6*y^2" , "x^6*z^2" , "x^6*y*z" ,
3200   "x^5*y^3" , "x^5*y^2*z" , "x^5*y*z^2" , "x^5*z^3" , "x^4*y^4" , "x^4*y^3*z" , "x^4*y^2*z^2" , "x^4*y*z^3" ,
3201   "x^4*z^4" , "x^3*y^5" , "x^3*y^4*z" , "x^3*y^3*z^2" , "x^3*y^2*z^3" , "x^3*y*z^4" , "x^3*z^5" , "x^2*y^6" ,
3202   "x^2*y^5*z" , "x^2*y^4*z^2" , "x^2*y^3*z^3" , "x^2*y^2*z^4" , "x^2*y*z^5" , "x^2*z^6" , "x*y^7" , "x*y^6*z" ,
3203   "x*y^5*z^2" , "x*y^4*z^3" , "x*y^3*z^4" , "x*y^2*z^5" , "x*y*z^6" , "x*z^7" , "y^8" , "y^7*z" ,
3204   "y^6*z^2" , "y^5*z^3" , "y^4*z^4" , "y^3*z^5" , "y^2*z^6" , "y*z^7" , "z^8" , "x^9" , "x^8*y" , "x^8*z" ,
3205   "x^7*y^2" , "x^7*z^2" , "x^7*y*z" , "x^6*y^3" , "x^6*y^2*z" , "x^6*y*z^2" , "x^6*z^3" ,
3206   "x^5*y^4" , "x^5*y^3*z" , "x^5*y^2*z^2" , "x^5*y*z^3" , "x^5*z^4" , "x^4*y^5" , "x^4*y^4*z" , "x^4*y^3*z^2" ,
3207   "x^4*y^2*z^3" , "x^4*y*z^4" , "x^4*z^5" , "x^3*y^6" , "x^3*y^5*z" , "x^3*y^4*z^2" ,
3208   "x^3*y^3*z^3" , "x^3*y^2*z^4" , "x^3*y*z^5" , "x^3*z^6" , "x^2*y^7" , "x^2*y^6*z" , "x^2*y^5*z^2" ,
3209   "x^2*y^4*z^3" , "x^2*y^3*z^4" , "x^2*y^2*z^5" , "x^2*y*z^6" , "x^2*z^7" , "x*y^8" , "x*y^7*z" ,
3210   "x*y^6*z^2" , "x*y^5*z^3" , "x*y^4*z^4" , "x*y^3*z^5" , "x*y^2*z^6" , "x*y*z^7" , "x*z^8" ,
3211   "y^9" , "y^8*z" , "y^7*z^2" , "y^6*z^3" , "y^5*z^4" , "y^4*z^5" , "y^3*z^6" , "y^2*z^7" , "y*z^8" , "z^9" } ;
3212 
GA_polywarp_funcname(int pnum)3213 char * GA_polywarp_funcname( int pnum )
3214 {
3215    if( pnum < 0 || pnum >= NPOLNONI ) return NULL ;
3216    return PolyFuncName[pnum] ;
3217 }
3218 
3219 /*--------------------------------------------------------------------------*/
3220 /*! A wfunc function for cubic polynomials. */
3221 
mri_genalign_cubic(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)3222 void mri_genalign_cubic( int npar, float *wpar ,
3223                          int npt , float *xi, float *yi, float *zi ,
3224                                    float *xo, float *yo, float *zo  )
3225 {
3226    static mat44 gam ;  /* saved general affine matrix */
3227    static float xcen,ycen,zcen,xyzfac,xyzinv , ppar[3*NPOLCUBI] ;
3228    static int puse[NPOLCUBI] , pall ;
3229 
3230    /** new parameters ==> setup matrix */
3231 
3232    if( npar >= 3*NPOLCUBI+16 && wpar != NULL ){
3233      int aa=aff_use_after , ab=aff_use_before , jj ;
3234 
3235      xcen   = wpar[12+3*NPOLCUBI] ;  /* the fake (non-varying) parameters */
3236      ycen   = wpar[13+3*NPOLCUBI] ;
3237      zcen   = wpar[14+3*NPOLCUBI] ;
3238      xyzfac = wpar[15+3*NPOLCUBI] ; xyzinv = 1.0f / xyzfac ;
3239 
3240      aff_use_before = aff_use_after = 0;
3241      gam = GA_setup_affine( 12 , wpar ) ;  /* affine param setup */
3242      aff_use_before = ab; aff_use_after = aa;
3243 
3244      for( jj=0 ; jj < 3*NPOLCUBI ; jj++ )          /* save polynomial params */
3245        ppar[jj] = wpar[jj+12] * xyzinv ;
3246      for( pall=jj=0 ; jj < NPOLCUBI ; jj++ ){      /* mark which ones to use */
3247        puse[jj] = (ppar[3*jj  ] != 0.0f) ||
3248                   (ppar[3*jj+1] != 0.0f) || (ppar[3*jj+2] != 0.0f) ;
3249        pall += puse[jj] ;
3250      }
3251      pall = ( pall >= (int)(0.9f*NPOLCUBI) ) ;
3252 
3253 #if 0
3254      if( AFNI_yesenv("ALLIN_DEBUG") ){
3255        fprintf(stderr,"++ cubic params: xyz_cen=%.4g,%.4g,%.4g fac=%.4g:",
3256                xcen,ycen,zcen,xyzfac) ;
3257        for( jj=0 ; jj < 60 ; jj++ )
3258          fprintf(stderr,"%s%.4g",(jj==12||jj==30)?" | ":" ",wpar[jj]) ;
3259        fprintf(stderr,"\n") ;
3260      }
3261 #endif
3262 
3263    }
3264 
3265    /* nothing to transform? (a setup call) */
3266 
3267    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
3268 
3269    /*--- do some work ---*/
3270 
3271  AFNI_OMP_START ;
3272 #pragma omp parallel if( npt > 6666 )
3273  { int ii,jj,kk ; float aa,bb,cc , uu,vv,ww , pv[NPOLCUBI] ;
3274    memset( pv , 0 , sizeof(float)*NPOLCUBI ) ;
3275 #pragma omp for
3276    for( ii=0 ; ii < npt ; ii++ ){
3277 
3278      aa = xi[ii] ; bb = yi[ii] ; cc = zi[ii] ;  /* input indexes/coords */
3279 
3280      if( aff_use_before ){             /* convert to 'real' coordinates */
3281        MAT44_VEC( aff_before , aa,bb,cc , uu,vv,ww ) ;
3282      } else {
3283        uu = aa ; vv = bb ; ww = cc ;
3284      }
3285      MAT44_VEC( gam , uu,vv,ww, aa,bb,cc ) ;             /* affine part */
3286 
3287      /* centered and scaled to run from -1..1 */
3288 
3289      uu = (uu-xcen)*xyzfac ; vv = (vv-ycen)*xyzfac ; ww = (ww-zcen)*xyzfac ;
3290      FIXYZ(uu) ; FIXYZ(vv) ; FIXYZ(ww) ;
3291 
3292      /* polynomials */
3293 
3294      if( pall ){
3295        pv[ 0] = P2_xx (uu,vv,ww) ; pv[ 1] = P2_xy (uu,vv,ww) ;
3296        pv[ 2] = P2_xz (uu,vv,ww) ; pv[ 3] = P2_yy (uu,vv,ww) ;
3297        pv[ 4] = P2_yz (uu,vv,ww) ; pv[ 5] = P2_zz (uu,vv,ww) ;
3298        pv[ 6] = P3_xxx(uu,vv,ww) ; pv[ 7] = P3_xxy(uu,vv,ww) ;
3299        pv[ 8] = P3_xxz(uu,vv,ww) ; pv[ 9] = P3_xyy(uu,vv,ww) ;
3300        pv[10] = P3_xzz(uu,vv,ww) ; pv[11] = P3_xyz(uu,vv,ww) ;
3301        pv[12] = P3_yyy(uu,vv,ww) ; pv[13] = P3_yyz(uu,vv,ww) ;
3302        pv[14] = P3_yzz(uu,vv,ww) ; pv[15] = P3_zzz(uu,vv,ww) ;
3303        for( kk=jj=0 ; jj < NPOLCUBI ; jj++,kk+=3 ){
3304          aa += ppar[kk  ] * pv[jj] ;
3305          bb += ppar[kk+1] * pv[jj] ;
3306          cc += ppar[kk+2] * pv[jj] ;
3307        }
3308      } else {
3309        if( puse[ 0] ) pv[ 0] = P2_xx (uu,vv,ww) ;
3310        if( puse[ 1] ) pv[ 1] = P2_xy (uu,vv,ww) ;
3311        if( puse[ 2] ) pv[ 2] = P2_xz (uu,vv,ww) ;
3312        if( puse[ 3] ) pv[ 3] = P2_yy (uu,vv,ww) ;
3313        if( puse[ 4] ) pv[ 4] = P2_yz (uu,vv,ww) ;
3314        if( puse[ 5] ) pv[ 5] = P2_zz (uu,vv,ww) ;
3315        if( puse[ 6] ) pv[ 6] = P3_xxx(uu,vv,ww) ;
3316        if( puse[ 7] ) pv[ 7] = P3_xxy(uu,vv,ww) ;
3317        if( puse[ 8] ) pv[ 8] = P3_xxz(uu,vv,ww) ;
3318        if( puse[ 9] ) pv[ 9] = P3_xyy(uu,vv,ww) ;
3319        if( puse[10] ) pv[10] = P3_xzz(uu,vv,ww) ;
3320        if( puse[11] ) pv[11] = P3_xyz(uu,vv,ww) ;
3321        if( puse[12] ) pv[12] = P3_yyy(uu,vv,ww) ;
3322        if( puse[13] ) pv[13] = P3_yyz(uu,vv,ww) ;
3323        if( puse[14] ) pv[14] = P3_yzz(uu,vv,ww) ;
3324        if( puse[15] ) pv[15] = P3_zzz(uu,vv,ww) ;
3325        for( kk=jj=0 ; jj < NPOLCUBI ; jj++,kk+=3 ){
3326          if( puse[jj] ){
3327            aa += ppar[kk  ] * pv[jj] ;
3328            bb += ppar[kk+1] * pv[jj] ;
3329            cc += ppar[kk+2] * pv[jj] ;
3330          }
3331        }
3332      }
3333 
3334      if( aff_use_after ){                    /* convert back to indexes */
3335        MAT44_VEC( aff_after , aa,bb,cc , xo[ii],yo[ii],zo[ii] ) ;
3336      } else {
3337        xo[ii] = aa ; yo[ii] = bb ; zo[ii] = cc ;
3338      }
3339 
3340    } /* end of loop over input points */
3341  }
3342  AFNI_OMP_END ;
3343 
3344    return ;
3345 }
3346 
3347 /*--------------------------------------------------------------------------*/
3348 /*! A wfunc function for quintic polynomials. */
3349 
mri_genalign_quintic(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)3350 void mri_genalign_quintic( int npar, float *wpar ,
3351                            int npt , float *xi, float *yi, float *zi ,
3352                                      float *xo, float *yo, float *zo  )
3353 {
3354    static mat44 gam ;  /* saved general affine matrix */
3355    static float xcen,ycen,zcen,xyzfac,xyzinv , ppar[3*NPOLQUIN] ;
3356    static int puse[NPOLQUIN] , pall ;
3357 
3358    /** new parameters ==> setup matrix */
3359 
3360    if( npar >= 3*NPOLQUIN+16 && wpar != NULL ){
3361      int aa=aff_use_after , ab=aff_use_before , jj ;
3362 
3363      xcen   = wpar[12+3*NPOLQUIN] ;  /* the fake (non-varying) parameters */
3364      ycen   = wpar[13+3*NPOLQUIN] ;
3365      zcen   = wpar[14+3*NPOLQUIN] ;
3366      xyzfac = wpar[15+3*NPOLQUIN] ; xyzinv = 1.0f / xyzfac ;
3367 
3368      aff_use_before = aff_use_after = 0;
3369      gam = GA_setup_affine( 12 , wpar ) ;  /* affine param setup */
3370      aff_use_before = ab; aff_use_after = aa;
3371 
3372      for( jj=0 ; jj < 3*NPOLQUIN ; jj++ )          /* save polynomial params */
3373        ppar[jj] = wpar[jj+12] * xyzinv ;
3374      for( pall=jj=0 ; jj < NPOLQUIN ; jj++ ){      /* mark which ones to use */
3375        puse[jj] = (ppar[3*jj  ] != 0.0f) ||
3376                   (ppar[3*jj+1] != 0.0f) || (ppar[3*jj+2] != 0.0f) ;
3377        pall += puse[jj] ;
3378      }
3379      pall = ( pall >= (int)(0.9f*NPOLQUIN) ) ;
3380 
3381 #if 0
3382      if( AFNI_yesenv("ALLIN_DEBUG") ){
3383        fprintf(stderr,"++ quintic params: xyz_cen=%.4g,%.4g,%.4g fac=%.4g:",
3384                xcen,ycen,zcen,xyzfac) ;
3385        for( jj=0 ; jj < 168 ; jj++ )
3386          fprintf(stderr,"%s%.4g",(jj==12||jj==30||jj==60||jj==105)?" | ":" ",wpar[jj]) ;
3387        fprintf(stderr,"\n") ;
3388      }
3389 #endif
3390 
3391    }
3392 
3393    /* nothing to transform? (a setup call) */
3394 
3395    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
3396 
3397    /*--- do some work ---*/
3398 
3399  AFNI_OMP_START ;
3400 #pragma omp parallel if( npt > 5555 )
3401  { int ii,jj,kk ; float aa,bb,cc , uu,vv,ww , pv[NPOLQUIN] ;
3402 #pragma omp for
3403    for( ii=0 ; ii < npt ; ii++ ){
3404 
3405      aa = xi[ii] ; bb = yi[ii] ; cc = zi[ii] ;  /* input indexes/coords */
3406 
3407      if( aff_use_before ){             /* convert to 'real' coordinates */
3408        MAT44_VEC( aff_before , aa,bb,cc , uu,vv,ww ) ;
3409      } else {
3410        uu = aa ; vv = bb ; ww = cc ;
3411      }
3412      MAT44_VEC( gam , uu,vv,ww, aa,bb,cc ) ;             /* affine part */
3413 
3414      /* centered and scaled to run from -1..1 */
3415 
3416      uu = (uu-xcen)*xyzfac ; vv = (vv-ycen)*xyzfac ; ww = (ww-zcen)*xyzfac ;
3417      FIXYZ(uu) ; FIXYZ(vv) ; FIXYZ(ww) ;
3418 
3419      /* polynomials */
3420 
3421      if( pall ){
3422        float p1x,p2x,p3x,p4x,p5x , p1y,p2y,p3y,p4y,p5y , p1z,p2z,p3z,p4z,p5z ;
3423        p1x = PP1(uu); p2x = PP2(uu); p3x = PP3(uu); p4x = PP4(uu); p5x = PP5(uu);
3424        p1y = PP1(vv); p2y = PP2(vv); p3y = PP3(vv); p4y = PP4(vv); p5y = PP5(vv);
3425        p1z = PP1(ww); p2z = PP2(ww); p3z = PP3(ww); p4z = PP4(ww); p5z = PP5(ww);
3426 
3427 #define Q2_xx p2x
3428 #define Q2_xy p1x*p1y
3429 #define Q2_xz p1x*p1z
3430 #define Q2_yy p2y
3431 #define Q2_yz p1y*p1z
3432 #define Q2_zz p2z
3433 #define Q3_xxx p3x
3434 #define Q3_xxy p2x*p1y
3435 #define Q3_xxz p2x*p1z
3436 #define Q3_xyy p1x*p2y
3437 #define Q3_xzz p1x*p2z
3438 #define Q3_xyz p1x*p1y*p1z
3439 #define Q3_yyy p3y
3440 #define Q3_yyz p2y*p1z
3441 #define Q3_yzz p1y*p2z
3442 #define Q3_zzz p3z
3443 #define Q4_xxxx p4x
3444 #define Q4_xxxy p3x*p1y
3445 #define Q4_xxxz p3x*p1z
3446 #define Q4_xxyy p2x*p2y
3447 #define Q4_xxzz p2x*p2z
3448 #define Q4_xxyz p2x*p1y*p1z
3449 #define Q4_xyyy p1x*p3y
3450 #define Q4_xyyz p1x*p2y*p1z
3451 #define Q4_xyzz p1x*p1y*p2z
3452 #define Q4_xzzz p1x*p3z
3453 #define Q4_yyyy p4y
3454 #define Q4_yyyz p3y*p1z
3455 #define Q4_yyzz p2y*p2z
3456 #define Q4_yzzz p1y*p3z
3457 #define Q4_zzzz p4z
3458 #define Q5_xxxxx p5x
3459 #define Q5_xxxxy p4x*p1y
3460 #define Q5_xxxxz p4x*p1z
3461 #define Q5_xxxyy p3x*p2y
3462 #define Q5_xxxzz p3x*p2z
3463 #define Q5_xxxyz p3x*p1y*p1z
3464 #define Q5_xxyyy p2x*p3y
3465 #define Q5_xxyyz p2x*p2y*p1z
3466 #define Q5_xxyzz p2x*p1y*p2z
3467 #define Q5_xxzzz p2x*p3z
3468 #define Q5_xyyyy p1x*p4y
3469 #define Q5_xyyyz p1x*p3y*p1z
3470 #define Q5_xyyzz p1x*p2y*p2z
3471 #define Q5_xyzzz p1x*p1y*p3z
3472 #define Q5_xzzzz p1x*p4z
3473 #define Q5_yyyyy p5y
3474 #define Q5_yyyyz p4y*p1z
3475 #define Q5_yyyzz p3y*p2z
3476 #define Q5_yyzzz p2y*p3z
3477 #define Q5_yzzzz p1y*p4z
3478 #define Q5_zzzzz p5z
3479        pv[ 0] = Q2_xx  ; pv[ 1] = Q2_xy  ; pv[ 2] = Q2_xz  ; pv[ 3] = Q2_yy  ;
3480        pv[ 4] = Q2_yz  ; pv[ 5] = Q2_zz  ; pv[ 6] = Q3_xxx ; pv[ 7] = Q3_xxy ;
3481        pv[ 8] = Q3_xxz ; pv[ 9] = Q3_xyy ; pv[10] = Q3_xzz ; pv[11] = Q3_xyz ;
3482        pv[12] = Q3_yyy ; pv[13] = Q3_yyz ; pv[14] = Q3_yzz ; pv[15] = Q3_zzz ;
3483        pv[16] = Q4_xxxx ; pv[17] = Q4_xxxy ; pv[18] = Q4_xxxz ; pv[19] = Q4_xxyy ;
3484        pv[20] = Q4_xxzz ; pv[21] = Q4_xxyz ; pv[22] = Q4_xyyy ; pv[23] = Q4_xyyz ;
3485        pv[24] = Q4_xyzz ; pv[25] = Q4_xzzz ; pv[26] = Q4_yyyy ; pv[27] = Q4_yyyz ;
3486        pv[28] = Q4_yyzz ; pv[29] = Q4_yzzz ; pv[30] = Q4_zzzz ; pv[31] = Q5_xxxxx ;
3487        pv[32] = Q5_xxxxy ; pv[33] = Q5_xxxxz ; pv[34] = Q5_xxxyy ; pv[35] = Q5_xxxzz ;
3488        pv[36] = Q5_xxxyz ; pv[37] = Q5_xxyyy ; pv[38] = Q5_xxyyz ; pv[39] = Q5_xxyzz ;
3489        pv[40] = Q5_xxzzz ; pv[41] = Q5_xyyyy ; pv[42] = Q5_xyyyz ; pv[43] = Q5_xyyzz ;
3490        pv[44] = Q5_xyzzz ; pv[45] = Q5_xzzzz ; pv[46] = Q5_yyyyy ; pv[47] = Q5_yyyyz ;
3491        pv[48] = Q5_yyyzz ; pv[49] = Q5_yyzzz ; pv[50] = Q5_yzzzz ; pv[51] = Q5_zzzzz ;
3492        for( kk=jj=0 ; jj < NPOLQUIN ; jj++,kk+=3 ){
3493          aa += ppar[kk  ] * pv[jj] ;
3494          bb += ppar[kk+1] * pv[jj] ;
3495          cc += ppar[kk+2] * pv[jj] ;
3496        }
3497      } else {
3498        if( puse[ 0] ) pv[ 0] = P2_xx (uu,vv,ww) ;
3499        if( puse[ 1] ) pv[ 1] = P2_xy (uu,vv,ww) ;
3500        if( puse[ 2] ) pv[ 2] = P2_xz (uu,vv,ww) ;
3501        if( puse[ 3] ) pv[ 3] = P2_yy (uu,vv,ww) ;
3502        if( puse[ 4] ) pv[ 4] = P2_yz (uu,vv,ww) ;
3503        if( puse[ 5] ) pv[ 5] = P2_zz (uu,vv,ww) ;
3504        if( puse[ 6] ) pv[ 6] = P3_xxx(uu,vv,ww) ;
3505        if( puse[ 7] ) pv[ 7] = P3_xxy(uu,vv,ww) ;
3506        if( puse[ 8] ) pv[ 8] = P3_xxz(uu,vv,ww) ;
3507        if( puse[ 9] ) pv[ 9] = P3_xyy(uu,vv,ww) ;
3508        if( puse[10] ) pv[10] = P3_xzz(uu,vv,ww) ;
3509        if( puse[11] ) pv[11] = P3_xyz(uu,vv,ww) ;
3510        if( puse[12] ) pv[12] = P3_yyy(uu,vv,ww) ;
3511        if( puse[13] ) pv[13] = P3_yyz(uu,vv,ww) ;
3512        if( puse[14] ) pv[14] = P3_yzz(uu,vv,ww) ;
3513        if( puse[15] ) pv[15] = P3_zzz(uu,vv,ww) ;
3514        if( puse[16] ) pv[16] = P4_xxxx(uu,vv,ww) ;
3515        if( puse[17] ) pv[17] = P4_xxxy(uu,vv,ww) ;
3516        if( puse[18] ) pv[18] = P4_xxxz(uu,vv,ww) ;
3517        if( puse[19] ) pv[19] = P4_xxyy(uu,vv,ww) ;
3518        if( puse[20] ) pv[20] = P4_xxzz(uu,vv,ww) ;
3519        if( puse[21] ) pv[21] = P4_xxyz(uu,vv,ww) ;
3520        if( puse[22] ) pv[22] = P4_xyyy(uu,vv,ww) ;
3521        if( puse[23] ) pv[23] = P4_xyyz(uu,vv,ww) ;
3522        if( puse[24] ) pv[24] = P4_xyzz(uu,vv,ww) ;
3523        if( puse[25] ) pv[25] = P4_xzzz(uu,vv,ww) ;
3524        if( puse[26] ) pv[26] = P4_yyyy(uu,vv,ww) ;
3525        if( puse[27] ) pv[27] = P4_yyyz(uu,vv,ww) ;
3526        if( puse[28] ) pv[28] = P4_yyzz(uu,vv,ww) ;
3527        if( puse[29] ) pv[29] = P4_yzzz(uu,vv,ww) ;
3528        if( puse[30] ) pv[30] = P4_zzzz(uu,vv,ww) ;
3529        if( puse[31] ) pv[31] = P5_xxxxx(uu,vv,ww) ;
3530        if( puse[32] ) pv[32] = P5_xxxxy(uu,vv,ww) ;
3531        if( puse[33] ) pv[33] = P5_xxxxz(uu,vv,ww) ;
3532        if( puse[34] ) pv[34] = P5_xxxyy(uu,vv,ww) ;
3533        if( puse[35] ) pv[35] = P5_xxxzz(uu,vv,ww) ;
3534        if( puse[36] ) pv[36] = P5_xxxyz(uu,vv,ww) ;
3535        if( puse[37] ) pv[37] = P5_xxyyy(uu,vv,ww) ;
3536        if( puse[38] ) pv[38] = P5_xxyyz(uu,vv,ww) ;
3537        if( puse[39] ) pv[39] = P5_xxyzz(uu,vv,ww) ;
3538        if( puse[40] ) pv[40] = P5_xxzzz(uu,vv,ww) ;
3539        if( puse[41] ) pv[41] = P5_xyyyy(uu,vv,ww) ;
3540        if( puse[42] ) pv[42] = P5_xyyyz(uu,vv,ww) ;
3541        if( puse[43] ) pv[43] = P5_xyyzz(uu,vv,ww) ;
3542        if( puse[44] ) pv[44] = P5_xyzzz(uu,vv,ww) ;
3543        if( puse[45] ) pv[45] = P5_xzzzz(uu,vv,ww) ;
3544        if( puse[46] ) pv[46] = P5_yyyyy(uu,vv,ww) ;
3545        if( puse[47] ) pv[47] = P5_yyyyz(uu,vv,ww) ;
3546        if( puse[48] ) pv[48] = P5_yyyzz(uu,vv,ww) ;
3547        if( puse[49] ) pv[49] = P5_yyzzz(uu,vv,ww) ;
3548        if( puse[50] ) pv[50] = P5_yzzzz(uu,vv,ww) ;
3549        if( puse[51] ) pv[51] = P5_zzzzz(uu,vv,ww) ;
3550        for( kk=jj=0 ; jj < NPOLQUIN ; jj++,kk+=3 ){
3551          if( puse[jj] ){
3552            aa += ppar[kk  ] * pv[jj] ;
3553            bb += ppar[kk+1] * pv[jj] ;
3554            cc += ppar[kk+2] * pv[jj] ;
3555          }
3556        }
3557      }
3558 
3559      if( aff_use_after ){                    /* convert back to indexes */
3560        MAT44_VEC( aff_after , aa,bb,cc , xo[ii],yo[ii],zo[ii] ) ;
3561      } else {
3562        xo[ii] = aa ; yo[ii] = bb ; zo[ii] = cc ;
3563      }
3564 
3565    } /* end of loop over input points */
3566  }
3567  AFNI_OMP_END ;
3568 
3569    return ;
3570 }
3571 
3572 /*--------------------------------------------------------------------------*/
3573 /*! A wfunc function for heptic polynomials. */
3574 
mri_genalign_heptic(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)3575 void mri_genalign_heptic( int npar, float *wpar ,
3576                           int npt , float *xi, float *yi, float *zi ,
3577                                     float *xo, float *yo, float *zo  )
3578 {
3579    static mat44 gam ;  /* saved general affine matrix */
3580    static float xcen,ycen,zcen,xyzfac,xyzinv , ppar[3*NPOLHEPT] ;
3581    static int puse[NPOLHEPT] , pall ;
3582 
3583    /** new parameters ==> setup matrix */
3584 
3585    if( npar >= 3*NPOLHEPT+16 && wpar != NULL ){
3586      int aa=aff_use_after , ab=aff_use_before , jj ;
3587 
3588      xcen   = wpar[12+3*NPOLHEPT] ;  /* the fake (non-varying) parameters */
3589      ycen   = wpar[13+3*NPOLHEPT] ;
3590      zcen   = wpar[14+3*NPOLHEPT] ;
3591      xyzfac = wpar[15+3*NPOLHEPT] ; xyzinv = 1.0f / xyzfac ;
3592 
3593      aff_use_before = aff_use_after = 0;
3594      gam = GA_setup_affine( 12 , wpar ) ;  /* affine param setup */
3595      aff_use_before = ab; aff_use_after = aa;
3596 
3597      for( jj=0 ; jj < 3*NPOLHEPT ; jj++ )          /* save polynomial params */
3598        ppar[jj] = wpar[jj+12] * xyzinv ;
3599      for( pall=jj=0 ; jj < NPOLHEPT ; jj++ ){      /* mark which ones to use */
3600        puse[jj] = (ppar[3*jj  ] != 0.0f) ||
3601                   (ppar[3*jj+1] != 0.0f) || (ppar[3*jj+2] != 0.0f) ;
3602        pall += puse[jj] ;
3603      }
3604      pall = ( pall >= (int)(0.9f*NPOLHEPT) ) ;
3605    }
3606 
3607    /* nothing to transform? (a setup call) */
3608 
3609    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
3610 
3611    /*--- do some work ---*/
3612 
3613  AFNI_OMP_START ;
3614 #pragma omp parallel if( npt > 4444 )
3615  { int ii,jj,kk ; float aa,bb,cc , uu,vv,ww , pv[NPOLHEPT] ;
3616 #pragma omp for
3617    for( ii=0 ; ii < npt ; ii++ ){
3618 
3619      aa = xi[ii] ; bb = yi[ii] ; cc = zi[ii] ;  /* input indexes/coords */
3620 
3621      if( aff_use_before ){             /* convert to 'real' coordinates */
3622        MAT44_VEC( aff_before , aa,bb,cc , uu,vv,ww ) ;
3623      } else {
3624        uu = aa ; vv = bb ; ww = cc ;
3625      }
3626      MAT44_VEC( gam , uu,vv,ww, aa,bb,cc ) ;             /* affine part */
3627 
3628      /* centered and scaled to run from -1..1 */
3629 
3630      uu = (uu-xcen)*xyzfac ; vv = (vv-ycen)*xyzfac ; ww = (ww-zcen)*xyzfac ;
3631      FIXYZ(uu) ; FIXYZ(vv) ; FIXYZ(ww) ;
3632 
3633      /* polynomials */
3634 
3635      if( pall ){
3636        float p1x,p2x,p3x,p4x,p5x,p6x,p7x,
3637              p1y,p2y,p3y,p4y,p5y,p6y,p7y,
3638              p1z,p2z,p3z,p4z,p5z,p6z,p7z ;
3639        p1x=PP1(uu); p2x=PP2(uu); p3x=PP3(uu); p4x=PP4(uu); p5x=PP5(uu); p6x=PP6(uu); p7x=PP7(uu);
3640        p1y=PP1(vv); p2y=PP2(vv); p3y=PP3(vv); p4y=PP4(vv); p5y=PP5(vv); p6y=PP6(vv); p7y=PP7(vv);
3641        p1z=PP1(ww); p2z=PP2(ww); p3z=PP3(ww); p4z=PP4(ww); p5z=PP5(ww); p6z=PP6(ww); p7z=PP7(ww);
3642 
3643 #define Q6_xxxxxx p6x
3644 #define Q6_xxxxxy p5x*p1y
3645 #define Q6_xxxxxz p5x*p1z
3646 #define Q6_xxxxyy p4x*p2y
3647 #define Q6_xxxxzz p4x*p2z
3648 #define Q6_xxxxyz p4x*p1y*p1z
3649 #define Q6_xxxyyy p3x*p3y
3650 #define Q6_xxxyyz p3x*p2y*p1z
3651 #define Q6_xxxyzz p3x*p1y*p2z
3652 #define Q6_xxxzzz p3x*p3z
3653 #define Q6_xxyyyy p2x*p4y
3654 #define Q6_xxyyyz p2x*p3y*p1z
3655 #define Q6_xxyyzz p2x*p2y*p2z
3656 #define Q6_xxyzzz p2x*p1y*p3z
3657 #define Q6_xxzzzz p2x*p4z
3658 #define Q6_xyyyyy p1x*p5y
3659 #define Q6_xyyyyz p1x*p4y*p1z
3660 #define Q6_xyyyzz p1x*p3y*p2z
3661 #define Q6_xyyzzz p1x*p2y*p3z
3662 #define Q6_xyzzzz p1x*p1y*p4z
3663 #define Q6_xzzzzz p1x*p5z
3664 #define Q6_yyyyyy p6y
3665 #define Q6_yyyyyz p5y*p1z
3666 #define Q6_yyyyzz p4y*p2z
3667 #define Q6_yyyzzz p3y*p3z
3668 #define Q6_yyzzzz p2y*p4z
3669 #define Q6_yzzzzz p1y*p5z
3670 #define Q6_zzzzzz p6z
3671 #define Q7_xxxxxxx p7x
3672 #define Q7_xxxxxxy p6x*p1y
3673 #define Q7_xxxxxxz p6x*p1z
3674 #define Q7_xxxxxyy p5x*p2y
3675 #define Q7_xxxxxzz p5x*p2z
3676 #define Q7_xxxxxyz p5x*p1y*p1z
3677 #define Q7_xxxxyyy p4x*p3y
3678 #define Q7_xxxxyyz p4x*p2y*p1z
3679 #define Q7_xxxxyzz p4x*p1y*p2z
3680 #define Q7_xxxxzzz p4x*p3z
3681 #define Q7_xxxyyyy p3x*p4y
3682 #define Q7_xxxyyyz p3x*p3y*p1z
3683 #define Q7_xxxyyzz p3x*p2y*p2z
3684 #define Q7_xxxyzzz p3x*p1y*p3z
3685 #define Q7_xxxzzzz p3x*p4z
3686 #define Q7_xxyyyyy p2x*p5y
3687 #define Q7_xxyyyyz p2x*p4y*p1z
3688 #define Q7_xxyyyzz p2x*p3y*p2z
3689 #define Q7_xxyyzzz p2x*p2y*p3z
3690 #define Q7_xxyzzzz p2x*p1y*p4z
3691 #define Q7_xxzzzzz p2x*p5z
3692 #define Q7_xyyyyyy p1x*p6y
3693 #define Q7_xyyyyyz p1x*p5y*p1z
3694 #define Q7_xyyyyzz p1x*p4y*p2z
3695 #define Q7_xyyyzzz p1x*p3y*p3z
3696 #define Q7_xyyzzzz p1x*p2y*p4z
3697 #define Q7_xyzzzzz p1x*p1y*p5z
3698 #define Q7_xzzzzzz p1x*p6z
3699 #define Q7_yyyyyyy p7y
3700 #define Q7_yyyyyyz p6y*p1z
3701 #define Q7_yyyyyzz p5y*p2z
3702 #define Q7_yyyyzzz p4y*p3z
3703 #define Q7_yyyzzzz p3y*p4z
3704 #define Q7_yyzzzzz p2y*p5z
3705 #define Q7_yzzzzzz p1y*p6z
3706 #define Q7_zzzzzzz p7z
3707 
3708        pv[ 0] = Q2_xx  ; pv[ 1] = Q2_xy  ; pv[ 2] = Q2_xz  ; pv[ 3] = Q2_yy  ;
3709        pv[ 4] = Q2_yz  ; pv[ 5] = Q2_zz  ; pv[ 6] = Q3_xxx ; pv[ 7] = Q3_xxy ;
3710        pv[ 8] = Q3_xxz ; pv[ 9] = Q3_xyy ; pv[10] = Q3_xzz ; pv[11] = Q3_xyz ;
3711        pv[12] = Q3_yyy ; pv[13] = Q3_yyz ; pv[14] = Q3_yzz ; pv[15] = Q3_zzz ;
3712        pv[16] = Q4_xxxx ; pv[17] = Q4_xxxy ; pv[18] = Q4_xxxz ; pv[19] = Q4_xxyy ;
3713        pv[20] = Q4_xxzz ; pv[21] = Q4_xxyz ; pv[22] = Q4_xyyy ; pv[23] = Q4_xyyz ;
3714        pv[24] = Q4_xyzz ; pv[25] = Q4_xzzz ; pv[26] = Q4_yyyy ; pv[27] = Q4_yyyz ;
3715        pv[28] = Q4_yyzz ; pv[29] = Q4_yzzz ; pv[30] = Q4_zzzz ; pv[31] = Q5_xxxxx ;
3716        pv[32] = Q5_xxxxy ; pv[33] = Q5_xxxxz ; pv[34] = Q5_xxxyy ; pv[35] = Q5_xxxzz ;
3717        pv[36] = Q5_xxxyz ; pv[37] = Q5_xxyyy ; pv[38] = Q5_xxyyz ; pv[39] = Q5_xxyzz ;
3718        pv[40] = Q5_xxzzz ; pv[41] = Q5_xyyyy ; pv[42] = Q5_xyyyz ; pv[43] = Q5_xyyzz ;
3719        pv[44] = Q5_xyzzz ; pv[45] = Q5_xzzzz ; pv[46] = Q5_yyyyy ; pv[47] = Q5_yyyyz ;
3720        pv[48] = Q5_yyyzz ; pv[49] = Q5_yyzzz ; pv[50] = Q5_yzzzz ; pv[51] = Q5_zzzzz ;
3721        kk = 52 ;
3722        pv[kk++] = Q6_xxxxxx ; pv[kk++] = Q6_xxxxxy ; pv[kk++] = Q6_xxxxxz ;
3723        pv[kk++] = Q6_xxxxyy ; pv[kk++] = Q6_xxxxzz ; pv[kk++] = Q6_xxxxyz ;
3724        pv[kk++] = Q6_xxxyyy ; pv[kk++] = Q6_xxxyyz ; pv[kk++] = Q6_xxxyzz ;
3725        pv[kk++] = Q6_xxxzzz ; pv[kk++] = Q6_xxyyyy ; pv[kk++] = Q6_xxyyyz ;
3726        pv[kk++] = Q6_xxyyzz ; pv[kk++] = Q6_xxyzzz ; pv[kk++] = Q6_xxzzzz ;
3727        pv[kk++] = Q6_xyyyyy ; pv[kk++] = Q6_xyyyyz ; pv[kk++] = Q6_xyyyzz ;
3728        pv[kk++] = Q6_xyyzzz ; pv[kk++] = Q6_xyzzzz ; pv[kk++] = Q6_xzzzzz ;
3729        pv[kk++] = Q6_yyyyyy ; pv[kk++] = Q6_yyyyyz ; pv[kk++] = Q6_yyyyzz ;
3730        pv[kk++] = Q6_yyyzzz ; pv[kk++] = Q6_yyzzzz ; pv[kk++] = Q6_yzzzzz ;
3731        pv[kk++] = Q6_zzzzzz ;
3732        pv[kk++] = Q7_xxxxxxx ; pv[kk++] = Q7_xxxxxxy ; pv[kk++] = Q7_xxxxxxz ;
3733        pv[kk++] = Q7_xxxxxyy ; pv[kk++] = Q7_xxxxxzz ; pv[kk++] = Q7_xxxxxyz ;
3734        pv[kk++] = Q7_xxxxyyy ; pv[kk++] = Q7_xxxxyyz ; pv[kk++] = Q7_xxxxyzz ;
3735        pv[kk++] = Q7_xxxxzzz ; pv[kk++] = Q7_xxxyyyy ; pv[kk++] = Q7_xxxyyyz ;
3736        pv[kk++] = Q7_xxxyyzz ; pv[kk++] = Q7_xxxyzzz ; pv[kk++] = Q7_xxxzzzz ;
3737        pv[kk++] = Q7_xxyyyyy ; pv[kk++] = Q7_xxyyyyz ; pv[kk++] = Q7_xxyyyzz ;
3738        pv[kk++] = Q7_xxyyzzz ; pv[kk++] = Q7_xxyzzzz ; pv[kk++] = Q7_xxzzzzz ;
3739        pv[kk++] = Q7_xyyyyyy ; pv[kk++] = Q7_xyyyyyz ; pv[kk++] = Q7_xyyyyzz ;
3740        pv[kk++] = Q7_xyyyzzz ; pv[kk++] = Q7_xyyzzzz ; pv[kk++] = Q7_xyzzzzz ;
3741        pv[kk++] = Q7_xzzzzzz ; pv[kk++] = Q7_yyyyyyy ; pv[kk++] = Q7_yyyyyyz ;
3742        pv[kk++] = Q7_yyyyyzz ; pv[kk++] = Q7_yyyyzzz ; pv[kk++] = Q7_yyyzzzz ;
3743        pv[kk++] = Q7_yyzzzzz ; pv[kk++] = Q7_yzzzzzz ; pv[kk++] = Q7_zzzzzzz ;
3744 
3745        for( kk=jj=0 ; jj < NPOLHEPT ; jj++,kk+=3 ){
3746          aa += ppar[kk  ] * pv[jj] ;
3747          bb += ppar[kk+1] * pv[jj] ;
3748          cc += ppar[kk+2] * pv[jj] ;
3749        }
3750      } else {
3751        if( puse[ 0] ) pv[ 0] = P2_xx (uu,vv,ww) ;
3752        if( puse[ 1] ) pv[ 1] = P2_xy (uu,vv,ww) ;
3753        if( puse[ 2] ) pv[ 2] = P2_xz (uu,vv,ww) ;
3754        if( puse[ 3] ) pv[ 3] = P2_yy (uu,vv,ww) ;
3755        if( puse[ 4] ) pv[ 4] = P2_yz (uu,vv,ww) ;
3756        if( puse[ 5] ) pv[ 5] = P2_zz (uu,vv,ww) ;
3757        if( puse[ 6] ) pv[ 6] = P3_xxx(uu,vv,ww) ;
3758        if( puse[ 7] ) pv[ 7] = P3_xxy(uu,vv,ww) ;
3759        if( puse[ 8] ) pv[ 8] = P3_xxz(uu,vv,ww) ;
3760        if( puse[ 9] ) pv[ 9] = P3_xyy(uu,vv,ww) ;
3761        if( puse[10] ) pv[10] = P3_xzz(uu,vv,ww) ;
3762        if( puse[11] ) pv[11] = P3_xyz(uu,vv,ww) ;
3763        if( puse[12] ) pv[12] = P3_yyy(uu,vv,ww) ;
3764        if( puse[13] ) pv[13] = P3_yyz(uu,vv,ww) ;
3765        if( puse[14] ) pv[14] = P3_yzz(uu,vv,ww) ;
3766        if( puse[15] ) pv[15] = P3_zzz(uu,vv,ww) ;
3767        if( puse[16] ) pv[16] = P4_xxxx(uu,vv,ww) ;
3768        if( puse[17] ) pv[17] = P4_xxxy(uu,vv,ww) ;
3769        if( puse[18] ) pv[18] = P4_xxxz(uu,vv,ww) ;
3770        if( puse[19] ) pv[19] = P4_xxyy(uu,vv,ww) ;
3771        if( puse[20] ) pv[20] = P4_xxzz(uu,vv,ww) ;
3772        if( puse[21] ) pv[21] = P4_xxyz(uu,vv,ww) ;
3773        if( puse[22] ) pv[22] = P4_xyyy(uu,vv,ww) ;
3774        if( puse[23] ) pv[23] = P4_xyyz(uu,vv,ww) ;
3775        if( puse[24] ) pv[24] = P4_xyzz(uu,vv,ww) ;
3776        if( puse[25] ) pv[25] = P4_xzzz(uu,vv,ww) ;
3777        if( puse[26] ) pv[26] = P4_yyyy(uu,vv,ww) ;
3778        if( puse[27] ) pv[27] = P4_yyyz(uu,vv,ww) ;
3779        if( puse[28] ) pv[28] = P4_yyzz(uu,vv,ww) ;
3780        if( puse[29] ) pv[29] = P4_yzzz(uu,vv,ww) ;
3781        if( puse[30] ) pv[30] = P4_zzzz(uu,vv,ww) ;
3782        if( puse[31] ) pv[31] = P5_xxxxx(uu,vv,ww) ;
3783        if( puse[32] ) pv[32] = P5_xxxxy(uu,vv,ww) ;
3784        if( puse[33] ) pv[33] = P5_xxxxz(uu,vv,ww) ;
3785        if( puse[34] ) pv[34] = P5_xxxyy(uu,vv,ww) ;
3786        if( puse[35] ) pv[35] = P5_xxxzz(uu,vv,ww) ;
3787        if( puse[36] ) pv[36] = P5_xxxyz(uu,vv,ww) ;
3788        if( puse[37] ) pv[37] = P5_xxyyy(uu,vv,ww) ;
3789        if( puse[38] ) pv[38] = P5_xxyyz(uu,vv,ww) ;
3790        if( puse[39] ) pv[39] = P5_xxyzz(uu,vv,ww) ;
3791        if( puse[40] ) pv[40] = P5_xxzzz(uu,vv,ww) ;
3792        if( puse[41] ) pv[41] = P5_xyyyy(uu,vv,ww) ;
3793        if( puse[42] ) pv[42] = P5_xyyyz(uu,vv,ww) ;
3794        if( puse[43] ) pv[43] = P5_xyyzz(uu,vv,ww) ;
3795        if( puse[44] ) pv[44] = P5_xyzzz(uu,vv,ww) ;
3796        if( puse[45] ) pv[45] = P5_xzzzz(uu,vv,ww) ;
3797        if( puse[46] ) pv[46] = P5_yyyyy(uu,vv,ww) ;
3798        if( puse[47] ) pv[47] = P5_yyyyz(uu,vv,ww) ;
3799        if( puse[48] ) pv[48] = P5_yyyzz(uu,vv,ww) ;
3800        if( puse[49] ) pv[49] = P5_yyzzz(uu,vv,ww) ;
3801        if( puse[50] ) pv[50] = P5_yzzzz(uu,vv,ww) ;
3802        if( puse[51] ) pv[51] = P5_zzzzz(uu,vv,ww) ;      kk = 51 ;
3803        if( puse[++kk] ) pv[kk] = P6_xxxxxx(uu,vv,ww) ;
3804        if( puse[++kk] ) pv[kk] = P6_xxxxxy(uu,vv,ww) ;
3805        if( puse[++kk] ) pv[kk] = P6_xxxxxz(uu,vv,ww) ;
3806        if( puse[++kk] ) pv[kk] = P6_xxxxyy(uu,vv,ww) ;
3807        if( puse[++kk] ) pv[kk] = P6_xxxxzz(uu,vv,ww) ;
3808        if( puse[++kk] ) pv[kk] = P6_xxxxyz(uu,vv,ww) ;
3809        if( puse[++kk] ) pv[kk] = P6_xxxyyy(uu,vv,ww) ;
3810        if( puse[++kk] ) pv[kk] = P6_xxxyyz(uu,vv,ww) ;
3811        if( puse[++kk] ) pv[kk] = P6_xxxyzz(uu,vv,ww) ;
3812        if( puse[++kk] ) pv[kk] = P6_xxxzzz(uu,vv,ww) ;
3813        if( puse[++kk] ) pv[kk] = P6_xxyyyy(uu,vv,ww) ;
3814        if( puse[++kk] ) pv[kk] = P6_xxyyyz(uu,vv,ww) ;
3815        if( puse[++kk] ) pv[kk] = P6_xxyyzz(uu,vv,ww) ;
3816        if( puse[++kk] ) pv[kk] = P6_xxyzzz(uu,vv,ww) ;
3817        if( puse[++kk] ) pv[kk] = P6_xxzzzz(uu,vv,ww) ;
3818        if( puse[++kk] ) pv[kk] = P6_xyyyyy(uu,vv,ww) ;
3819        if( puse[++kk] ) pv[kk] = P6_xyyyyz(uu,vv,ww) ;
3820        if( puse[++kk] ) pv[kk] = P6_xyyyzz(uu,vv,ww) ;
3821        if( puse[++kk] ) pv[kk] = P6_xyyzzz(uu,vv,ww) ;
3822        if( puse[++kk] ) pv[kk] = P6_xyzzzz(uu,vv,ww) ;
3823        if( puse[++kk] ) pv[kk] = P6_xzzzzz(uu,vv,ww) ;
3824        if( puse[++kk] ) pv[kk] = P6_yyyyyy(uu,vv,ww) ;
3825        if( puse[++kk] ) pv[kk] = P6_yyyyyz(uu,vv,ww) ;
3826        if( puse[++kk] ) pv[kk] = P6_yyyyzz(uu,vv,ww) ;
3827        if( puse[++kk] ) pv[kk] = P6_yyyzzz(uu,vv,ww) ;
3828        if( puse[++kk] ) pv[kk] = P6_yyzzzz(uu,vv,ww) ;
3829        if( puse[++kk] ) pv[kk] = P6_yzzzzz(uu,vv,ww) ;
3830        if( puse[++kk] ) pv[kk] = P6_zzzzzz(uu,vv,ww) ;
3831        if( puse[++kk] ) pv[kk] = P7_xxxxxxx(uu,vv,ww) ;
3832        if( puse[++kk] ) pv[kk] = P7_xxxxxxy(uu,vv,ww) ;
3833        if( puse[++kk] ) pv[kk] = P7_xxxxxxz(uu,vv,ww) ;
3834        if( puse[++kk] ) pv[kk] = P7_xxxxxyy(uu,vv,ww) ;
3835        if( puse[++kk] ) pv[kk] = P7_xxxxxzz(uu,vv,ww) ;
3836        if( puse[++kk] ) pv[kk] = P7_xxxxxyz(uu,vv,ww) ;
3837        if( puse[++kk] ) pv[kk] = P7_xxxxyyy(uu,vv,ww) ;
3838        if( puse[++kk] ) pv[kk] = P7_xxxxyyz(uu,vv,ww) ;
3839        if( puse[++kk] ) pv[kk] = P7_xxxxyzz(uu,vv,ww) ;
3840        if( puse[++kk] ) pv[kk] = P7_xxxxzzz(uu,vv,ww) ;
3841        if( puse[++kk] ) pv[kk] = P7_xxxyyyy(uu,vv,ww) ;
3842        if( puse[++kk] ) pv[kk] = P7_xxxyyyz(uu,vv,ww) ;
3843        if( puse[++kk] ) pv[kk] = P7_xxxyyzz(uu,vv,ww) ;
3844        if( puse[++kk] ) pv[kk] = P7_xxxyzzz(uu,vv,ww) ;
3845        if( puse[++kk] ) pv[kk] = P7_xxxzzzz(uu,vv,ww) ;
3846        if( puse[++kk] ) pv[kk] = P7_xxyyyyy(uu,vv,ww) ;
3847        if( puse[++kk] ) pv[kk] = P7_xxyyyyz(uu,vv,ww) ;
3848        if( puse[++kk] ) pv[kk] = P7_xxyyyzz(uu,vv,ww) ;
3849        if( puse[++kk] ) pv[kk] = P7_xxyyzzz(uu,vv,ww) ;
3850        if( puse[++kk] ) pv[kk] = P7_xxyzzzz(uu,vv,ww) ;
3851        if( puse[++kk] ) pv[kk] = P7_xxzzzzz(uu,vv,ww) ;
3852        if( puse[++kk] ) pv[kk] = P7_xyyyyyy(uu,vv,ww) ;
3853        if( puse[++kk] ) pv[kk] = P7_xyyyyyz(uu,vv,ww) ;
3854        if( puse[++kk] ) pv[kk] = P7_xyyyyzz(uu,vv,ww) ;
3855        if( puse[++kk] ) pv[kk] = P7_xyyyzzz(uu,vv,ww) ;
3856        if( puse[++kk] ) pv[kk] = P7_xyyzzzz(uu,vv,ww) ;
3857        if( puse[++kk] ) pv[kk] = P7_xyzzzzz(uu,vv,ww) ;
3858        if( puse[++kk] ) pv[kk] = P7_xzzzzzz(uu,vv,ww) ;
3859        if( puse[++kk] ) pv[kk] = P7_yyyyyyy(uu,vv,ww) ;
3860        if( puse[++kk] ) pv[kk] = P7_yyyyyyz(uu,vv,ww) ;
3861        if( puse[++kk] ) pv[kk] = P7_yyyyyzz(uu,vv,ww) ;
3862        if( puse[++kk] ) pv[kk] = P7_yyyyzzz(uu,vv,ww) ;
3863        if( puse[++kk] ) pv[kk] = P7_yyyzzzz(uu,vv,ww) ;
3864        if( puse[++kk] ) pv[kk] = P7_yyzzzzz(uu,vv,ww) ;
3865        if( puse[++kk] ) pv[kk] = P7_yzzzzzz(uu,vv,ww) ;
3866        if( puse[++kk] ) pv[kk] = P7_zzzzzzz(uu,vv,ww) ;
3867        for( kk=jj=0 ; jj < NPOLHEPT ; jj++,kk+=3 ){
3868          if( puse[jj] ){
3869            aa += ppar[kk  ] * pv[jj] ;
3870            bb += ppar[kk+1] * pv[jj] ;
3871            cc += ppar[kk+2] * pv[jj] ;
3872          }
3873        }
3874      }
3875 
3876      if( aff_use_after ){                    /* convert back to indexes */
3877        MAT44_VEC( aff_after , aa,bb,cc , xo[ii],yo[ii],zo[ii] ) ;
3878      } else {
3879        xo[ii] = aa ; yo[ii] = bb ; zo[ii] = cc ;
3880      }
3881 
3882    } /* end of loop over input points */
3883  }
3884  AFNI_OMP_END ;
3885 
3886 #if 0
3887  if( AFNI_yesenv("ALLIN_DEBUG") ){
3888    int ii ; float dd,dmax=0.0f ;
3889    static int ncall=0 ;
3890    for( ii=0 ; ii < npt ; ii++ ){
3891      dd = MYfabsf(xo[ii]-xi[ii]) + MYfabsf(yo[ii]-yi[ii]) + MYfabsf(zo[ii]-zi[ii]) ;
3892      if( dd > dmax ) dmax = dd ;
3893    }
3894    ncall++ ; ININFO_message("heptic %d: dmax = %.4g",ncall,dmax) ;
3895  }
3896 #endif
3897 
3898    return ;
3899 }
3900 
3901 /*--------------------------------------------------------------------------*/
3902 /*! A wfunc function for nonic (9th order) polynomials. */
3903 
mri_genalign_nonic(int npar,float * wpar,int npt,float * xi,float * yi,float * zi,float * xo,float * yo,float * zo)3904 void mri_genalign_nonic( int npar, float *wpar ,
3905                          int npt , float *xi, float *yi, float *zi ,
3906                                    float *xo, float *yo, float *zo  )
3907 {
3908    static mat44 gam ;  /* saved general affine matrix */
3909    static float xcen,ycen,zcen,xyzfac,xyzinv , ppar[3*NPOLNONI] ;
3910    static int puse[NPOLNONI] , pall , plast ;
3911 
3912 ENTRY("mri_genalign_nonic") ;
3913 
3914    /** new parameters ==> setup matrix */
3915 
3916    if( npar >= 3*NPOLNONI+16 && wpar != NULL ){
3917      int aa=aff_use_after , ab=aff_use_before , jj ;
3918 
3919 #if 0
3920 STATUS("setup params") ;
3921 #endif
3922 
3923      xcen   = wpar[12+3*NPOLNONI] ;  /* the fake (non-varying) parameters */
3924      ycen   = wpar[13+3*NPOLNONI] ;
3925      zcen   = wpar[14+3*NPOLNONI] ;
3926      xyzfac = wpar[15+3*NPOLNONI] ; xyzinv = 1.0f / xyzfac ;
3927 
3928      aff_use_before = aff_use_after = 0;
3929      gam = GA_setup_affine( 12 , wpar ) ;  /* affine param setup */
3930      aff_use_before = ab; aff_use_after = aa;
3931 
3932      for( jj=0 ; jj < 3*NPOLNONI ; jj++ )          /* save polynomial params */
3933        ppar[jj] = wpar[jj+12] * xyzinv ;
3934      for( pall=jj=0 ; jj < NPOLNONI ; jj++ ){      /* mark which ones to use */
3935        puse[jj] = (ppar[3*jj  ] != 0.0f) ||
3936                   (ppar[3*jj+1] != 0.0f) || (ppar[3*jj+2] != 0.0f) ;
3937        pall += puse[jj] ; if( puse[jj] ) plast = jj ;
3938      }
3939      pall = ( pall >= (int)(0.9f*NPOLNONI) ) ;
3940 
3941 #if 0
3942 STATUS("setup finished") ;
3943 #endif
3944    }
3945 
3946    /* nothing to transform? (a setup call) */
3947 
3948    if( npt <= 0 || xi == NULL || xo == NULL ) EXRETURN ;
3949 
3950    /*--- do some work ---*/
3951 
3952 #if 0
3953    if( pall ){ STATUS("use pall") ; }
3954    else {
3955      int jj ;
3956      STATUS("not pall") ;
3957      if( PRINT_TRACING ){
3958        fprintf(stderr,"   ppar:") ;
3959        for( jj=0 ; jj < 3*NPOLNONI ; jj++ )
3960          if( ppar[jj] != 0.0f ) fprintf(stderr," [%d]=%.5g",jj,ppar[jj]) ;
3961        fprintf(stderr,"\n") ;
3962      }
3963    }
3964 #endif
3965 
3966  AFNI_OMP_START ;
3967 #pragma omp parallel if( npt > 3333 )
3968  { int ii,jj,kk ; float aa,bb,cc , uu,vv,ww , pv[NPOLNONI] ;
3969 #pragma omp for
3970    for( ii=0 ; ii < npt ; ii++ ){
3971 
3972      aa = xi[ii] ; bb = yi[ii] ; cc = zi[ii] ;  /* input indexes/coords */
3973 
3974      if( aff_use_before ){             /* convert to 'real' coordinates */
3975        MAT44_VEC( aff_before , aa,bb,cc , uu,vv,ww ) ;
3976      } else {
3977        uu = aa ; vv = bb ; ww = cc ;
3978      }
3979      MAT44_VEC( gam , uu,vv,ww, aa,bb,cc ) ;             /* affine part */
3980 
3981      /* centered and scaled to run from -1..1 */
3982 
3983      uu = (uu-xcen)*xyzfac ; vv = (vv-ycen)*xyzfac ; ww = (ww-zcen)*xyzfac ;
3984      FIXYZ(uu) ; FIXYZ(vv) ; FIXYZ(ww) ;
3985 
3986      /* polynomials */
3987 
3988      if( pall ){
3989        float p1x,p2x,p3x,p4x,p5x,p6x,p7x,p8x,p9x,
3990              p1y,p2y,p3y,p4y,p5y,p6y,p7y,p8y,p9y,
3991              p1z,p2z,p3z,p4z,p5z,p6z,p7z,p8z,p9z ;
3992        p1x=PP1(uu); p2x=PP2(uu); p3x=PP3(uu); p4x=PP4(uu); p5x=PP5(uu); p6x=PP6(uu); p7x=PP7(uu); p8x=PP8(uu); p9x=PP9(uu);
3993        p1y=PP1(vv); p2y=PP2(vv); p3y=PP3(vv); p4y=PP4(vv); p5y=PP5(vv); p6y=PP6(vv); p7y=PP7(vv); p8y=PP8(vv); p9y=PP9(vv);
3994        p1z=PP1(ww); p2z=PP2(ww); p3z=PP3(ww); p4z=PP4(ww); p5z=PP5(ww); p6z=PP6(ww); p7z=PP7(ww); p8z=PP8(ww); p9z=PP9(ww);
3995 
3996 #define Q8_xxxxxxxx p8x
3997 #define Q8_xxxxxxxy p7x*p1y
3998 #define Q8_xxxxxxxz p7x*p1z
3999 #define Q8_xxxxxxyy p6x*p2y
4000 #define Q8_xxxxxxzz p6x*p2z
4001 #define Q8_xxxxxxyz p6x*p1y*p1z
4002 #define Q8_xxxxxyyy p5x*p3y
4003 #define Q8_xxxxxyyz p5x*p2y*p1z
4004 #define Q8_xxxxxyzz p5x*p1y*p2z
4005 #define Q8_xxxxxzzz p5x*p3z
4006 #define Q8_xxxxyyyy p4x*p4y
4007 #define Q8_xxxxyyyz p4x*p3y*p1z
4008 #define Q8_xxxxyyzz p4x*p2y*p2z
4009 #define Q8_xxxxyzzz p4x*p1y*p3z
4010 #define Q8_xxxxzzzz p4x*p4z
4011 #define Q8_xxxyyyyy p3x*p5y
4012 #define Q8_xxxyyyyz p3x*p4y*p1z
4013 #define Q8_xxxyyyzz p3x*p3y*p2z
4014 #define Q8_xxxyyzzz p3x*p2y*p3z
4015 #define Q8_xxxyzzzz p3x*p1y*p4z
4016 #define Q8_xxxzzzzz p3x*p5z
4017 #define Q8_xxyyyyyy p2x*p6y
4018 #define Q8_xxyyyyyz p2x*p5y*p1z
4019 #define Q8_xxyyyyzz p2x*p4y*p2z
4020 #define Q8_xxyyyzzz p2x*p3y*p3z
4021 #define Q8_xxyyzzzz p2x*p2y*p4z
4022 #define Q8_xxyzzzzz p2x*p1y*p5z
4023 #define Q8_xxzzzzzz p2x*p6z
4024 #define Q8_xyyyyyyy p1x*p7y
4025 #define Q8_xyyyyyyz p1x*p6y*p1z
4026 #define Q8_xyyyyyzz p1x*p5y*p2z
4027 #define Q8_xyyyyzzz p1x*p4y*p3z
4028 #define Q8_xyyyzzzz p1x*p3y*p4z
4029 #define Q8_xyyzzzzz p1x*p2y*p5z
4030 #define Q8_xyzzzzzz p1x*p1y*p6z
4031 #define Q8_xzzzzzzz p1x*p7z
4032 #define Q8_yyyyyyyy p8y
4033 #define Q8_yyyyyyyz p7y*p1z
4034 #define Q8_yyyyyyzz p6y*p2z
4035 #define Q8_yyyyyzzz p5y*p3z
4036 #define Q8_yyyyzzzz p4y*p4z
4037 #define Q8_yyyzzzzz p3y*p5z
4038 #define Q8_yyzzzzzz p2y*p6z
4039 #define Q8_yzzzzzzz p1y*p7z
4040 #define Q8_zzzzzzzz p8z
4041 #define Q9_xxxxxxxxx p9x
4042 #define Q9_xxxxxxxxy p8x*p1y
4043 #define Q9_xxxxxxxxz p8x*p1z
4044 #define Q9_xxxxxxxyy p7x*p2y
4045 #define Q9_xxxxxxxzz p7x*p2z
4046 #define Q9_xxxxxxxyz p7x*p1y*p1z
4047 #define Q9_xxxxxxyyy p6x*p3y
4048 #define Q9_xxxxxxyyz p6x*p2y*p1z
4049 #define Q9_xxxxxxyzz p6x*p1y*p2z
4050 #define Q9_xxxxxxzzz p6x*p3z
4051 #define Q9_xxxxxyyyy p5x*p4y
4052 #define Q9_xxxxxyyyz p5x*p3y*p1z
4053 #define Q9_xxxxxyyzz p5x*p2y*p2z
4054 #define Q9_xxxxxyzzz p5x*p1y*p3z
4055 #define Q9_xxxxxzzzz p5x*p4z
4056 #define Q9_xxxxyyyyy p4x*p5y
4057 #define Q9_xxxxyyyyz p4x*p4y*p1z
4058 #define Q9_xxxxyyyzz p4x*p3y*p2z
4059 #define Q9_xxxxyyzzz p4x*p2y*p3z
4060 #define Q9_xxxxyzzzz p4x*p1y*p4z
4061 #define Q9_xxxxzzzzz p4x*p5z
4062 #define Q9_xxxyyyyyy p3x*p6y
4063 #define Q9_xxxyyyyyz p3x*p5y*p1z
4064 #define Q9_xxxyyyyzz p3x*p4y*p2z
4065 #define Q9_xxxyyyzzz p3x*p3y*p3z
4066 #define Q9_xxxyyzzzz p3x*p2y*p4z
4067 #define Q9_xxxyzzzzz p3x*p1y*p5z
4068 #define Q9_xxxzzzzzz p3x*p6z
4069 #define Q9_xxyyyyyyy p2x*p7y
4070 #define Q9_xxyyyyyyz p2x*p6y*p1z
4071 #define Q9_xxyyyyyzz p2x*p5y*p2z
4072 #define Q9_xxyyyyzzz p2x*p4y*p3z
4073 #define Q9_xxyyyzzzz p2x*p3y*p4z
4074 #define Q9_xxyyzzzzz p2x*p2y*p5z
4075 #define Q9_xxyzzzzzz p2x*p1y*p6z
4076 #define Q9_xxzzzzzzz p2x*p7z
4077 #define Q9_xyyyyyyyy p1x*p8y
4078 #define Q9_xyyyyyyyz p1x*p7y*p1z
4079 #define Q9_xyyyyyyzz p1x*p6y*p2z
4080 #define Q9_xyyyyyzzz p1x*p5y*p3z
4081 #define Q9_xyyyyzzzz p1x*p4y*p4z
4082 #define Q9_xyyyzzzzz p1x*p3y*p5z
4083 #define Q9_xyyzzzzzz p1x*p2y*p6z
4084 #define Q9_xyzzzzzzz p1x*p1y*p7z
4085 #define Q9_xzzzzzzzz p1x*p8z
4086 #define Q9_yyyyyyyyy p9y
4087 #define Q9_yyyyyyyyz p8y*p1z
4088 #define Q9_yyyyyyyzz p7y*p2z
4089 #define Q9_yyyyyyzzz p6y*p3z
4090 #define Q9_yyyyyzzzz p5y*p4z
4091 #define Q9_yyyyzzzzz p4y*p5z
4092 #define Q9_yyyzzzzzz p3y*p6z
4093 #define Q9_yyzzzzzzz p2y*p7z
4094 #define Q9_yzzzzzzzz p1y*p8z
4095 #define Q9_zzzzzzzzz p9z
4096 
4097        pv[ 0] = Q2_xx  ; pv[ 1] = Q2_xy  ; pv[ 2] = Q2_xz  ; pv[ 3] = Q2_yy  ;
4098        pv[ 4] = Q2_yz  ; pv[ 5] = Q2_zz  ; pv[ 6] = Q3_xxx ; pv[ 7] = Q3_xxy ;
4099        pv[ 8] = Q3_xxz ; pv[ 9] = Q3_xyy ; pv[10] = Q3_xzz ; pv[11] = Q3_xyz ;
4100        pv[12] = Q3_yyy ; pv[13] = Q3_yyz ; pv[14] = Q3_yzz ; pv[15] = Q3_zzz ;
4101        pv[16] = Q4_xxxx ; pv[17] = Q4_xxxy ; pv[18] = Q4_xxxz ; pv[19] = Q4_xxyy ;
4102        pv[20] = Q4_xxzz ; pv[21] = Q4_xxyz ; pv[22] = Q4_xyyy ; pv[23] = Q4_xyyz ;
4103        pv[24] = Q4_xyzz ; pv[25] = Q4_xzzz ; pv[26] = Q4_yyyy ; pv[27] = Q4_yyyz ;
4104        pv[28] = Q4_yyzz ; pv[29] = Q4_yzzz ; pv[30] = Q4_zzzz ; pv[31] = Q5_xxxxx ;
4105        pv[32] = Q5_xxxxy ; pv[33] = Q5_xxxxz ; pv[34] = Q5_xxxyy ; pv[35] = Q5_xxxzz ;
4106        pv[36] = Q5_xxxyz ; pv[37] = Q5_xxyyy ; pv[38] = Q5_xxyyz ; pv[39] = Q5_xxyzz ;
4107        pv[40] = Q5_xxzzz ; pv[41] = Q5_xyyyy ; pv[42] = Q5_xyyyz ; pv[43] = Q5_xyyzz ;
4108        pv[44] = Q5_xyzzz ; pv[45] = Q5_xzzzz ; pv[46] = Q5_yyyyy ; pv[47] = Q5_yyyyz ;
4109        pv[48] = Q5_yyyzz ; pv[49] = Q5_yyzzz ; pv[50] = Q5_yzzzz ; pv[51] = Q5_zzzzz ;
4110        kk = 52 ;
4111        pv[kk++] = Q6_xxxxxx ; pv[kk++] = Q6_xxxxxy ; pv[kk++] = Q6_xxxxxz ;
4112        pv[kk++] = Q6_xxxxyy ; pv[kk++] = Q6_xxxxzz ; pv[kk++] = Q6_xxxxyz ;
4113        pv[kk++] = Q6_xxxyyy ; pv[kk++] = Q6_xxxyyz ; pv[kk++] = Q6_xxxyzz ;
4114        pv[kk++] = Q6_xxxzzz ; pv[kk++] = Q6_xxyyyy ; pv[kk++] = Q6_xxyyyz ;
4115        pv[kk++] = Q6_xxyyzz ; pv[kk++] = Q6_xxyzzz ; pv[kk++] = Q6_xxzzzz ;
4116        pv[kk++] = Q6_xyyyyy ; pv[kk++] = Q6_xyyyyz ; pv[kk++] = Q6_xyyyzz ;
4117        pv[kk++] = Q6_xyyzzz ; pv[kk++] = Q6_xyzzzz ; pv[kk++] = Q6_xzzzzz ;
4118        pv[kk++] = Q6_yyyyyy ; pv[kk++] = Q6_yyyyyz ; pv[kk++] = Q6_yyyyzz ;
4119        pv[kk++] = Q6_yyyzzz ; pv[kk++] = Q6_yyzzzz ; pv[kk++] = Q6_yzzzzz ;
4120        pv[kk++] = Q6_zzzzzz ;
4121        pv[kk++] = Q7_xxxxxxx ; pv[kk++] = Q7_xxxxxxy ; pv[kk++] = Q7_xxxxxxz ;
4122        pv[kk++] = Q7_xxxxxyy ; pv[kk++] = Q7_xxxxxzz ; pv[kk++] = Q7_xxxxxyz ;
4123        pv[kk++] = Q7_xxxxyyy ; pv[kk++] = Q7_xxxxyyz ; pv[kk++] = Q7_xxxxyzz ;
4124        pv[kk++] = Q7_xxxxzzz ; pv[kk++] = Q7_xxxyyyy ; pv[kk++] = Q7_xxxyyyz ;
4125        pv[kk++] = Q7_xxxyyzz ; pv[kk++] = Q7_xxxyzzz ; pv[kk++] = Q7_xxxzzzz ;
4126        pv[kk++] = Q7_xxyyyyy ; pv[kk++] = Q7_xxyyyyz ; pv[kk++] = Q7_xxyyyzz ;
4127        pv[kk++] = Q7_xxyyzzz ; pv[kk++] = Q7_xxyzzzz ; pv[kk++] = Q7_xxzzzzz ;
4128        pv[kk++] = Q7_xyyyyyy ; pv[kk++] = Q7_xyyyyyz ; pv[kk++] = Q7_xyyyyzz ;
4129        pv[kk++] = Q7_xyyyzzz ; pv[kk++] = Q7_xyyzzzz ; pv[kk++] = Q7_xyzzzzz ;
4130        pv[kk++] = Q7_xzzzzzz ; pv[kk++] = Q7_yyyyyyy ; pv[kk++] = Q7_yyyyyyz ;
4131        pv[kk++] = Q7_yyyyyzz ; pv[kk++] = Q7_yyyyzzz ; pv[kk++] = Q7_yyyzzzz ;
4132        pv[kk++] = Q7_yyzzzzz ; pv[kk++] = Q7_yzzzzzz ; pv[kk++] = Q7_zzzzzzz ;
4133        pv[kk++] = Q8_xxxxxxxx ; pv[kk++] = Q8_xxxxxxxy ; pv[kk++] = Q8_xxxxxxxz ;
4134        pv[kk++] = Q8_xxxxxxyy ; pv[kk++] = Q8_xxxxxxzz ; pv[kk++] = Q8_xxxxxxyz ;
4135        pv[kk++] = Q8_xxxxxyyy ; pv[kk++] = Q8_xxxxxyyz ; pv[kk++] = Q8_xxxxxyzz ;
4136        pv[kk++] = Q8_xxxxxzzz ; pv[kk++] = Q8_xxxxyyyy ; pv[kk++] = Q8_xxxxyyyz ;
4137        pv[kk++] = Q8_xxxxyyzz ; pv[kk++] = Q8_xxxxyzzz ; pv[kk++] = Q8_xxxxzzzz ;
4138        pv[kk++] = Q8_xxxyyyyy ; pv[kk++] = Q8_xxxyyyyz ; pv[kk++] = Q8_xxxyyyzz ;
4139        pv[kk++] = Q8_xxxyyzzz ; pv[kk++] = Q8_xxxyzzzz ; pv[kk++] = Q8_xxxzzzzz ;
4140        pv[kk++] = Q8_xxyyyyyy ; pv[kk++] = Q8_xxyyyyyz ; pv[kk++] = Q8_xxyyyyzz ;
4141        pv[kk++] = Q8_xxyyyzzz ; pv[kk++] = Q8_xxyyzzzz ; pv[kk++] = Q8_xxyzzzzz ;
4142        pv[kk++] = Q8_xxzzzzzz ; pv[kk++] = Q8_xyyyyyyy ; pv[kk++] = Q8_xyyyyyyz ;
4143        pv[kk++] = Q8_xyyyyyzz ; pv[kk++] = Q8_xyyyyzzz ; pv[kk++] = Q8_xyyyzzzz ;
4144        pv[kk++] = Q8_xyyzzzzz ; pv[kk++] = Q8_xyzzzzzz ; pv[kk++] = Q8_xzzzzzzz ;
4145        pv[kk++] = Q8_yyyyyyyy ; pv[kk++] = Q8_yyyyyyyz ; pv[kk++] = Q8_yyyyyyzz ;
4146        pv[kk++] = Q8_yyyyyzzz ; pv[kk++] = Q8_yyyyzzzz ; pv[kk++] = Q8_yyyzzzzz ;
4147        pv[kk++] = Q8_yyzzzzzz ; pv[kk++] = Q8_yzzzzzzz ; pv[kk++] = Q8_zzzzzzzz ;
4148        pv[kk++] = Q9_xxxxxxxxx ; pv[kk++] = Q9_xxxxxxxxy ; pv[kk++] = Q9_xxxxxxxxz ;
4149        pv[kk++] = Q9_xxxxxxxyy ; pv[kk++] = Q9_xxxxxxxzz ; pv[kk++] = Q9_xxxxxxxyz ;
4150        pv[kk++] = Q9_xxxxxxyyy ; pv[kk++] = Q9_xxxxxxyyz ; pv[kk++] = Q9_xxxxxxyzz ;
4151        pv[kk++] = Q9_xxxxxxzzz ; pv[kk++] = Q9_xxxxxyyyy ; pv[kk++] = Q9_xxxxxyyyz ;
4152        pv[kk++] = Q9_xxxxxyyzz ; pv[kk++] = Q9_xxxxxyzzz ; pv[kk++] = Q9_xxxxxzzzz ;
4153        pv[kk++] = Q9_xxxxyyyyy ; pv[kk++] = Q9_xxxxyyyyz ; pv[kk++] = Q9_xxxxyyyzz ;
4154        pv[kk++] = Q9_xxxxyyzzz ; pv[kk++] = Q9_xxxxyzzzz ; pv[kk++] = Q9_xxxxzzzzz ;
4155        pv[kk++] = Q9_xxxyyyyyy ; pv[kk++] = Q9_xxxyyyyyz ; pv[kk++] = Q9_xxxyyyyzz ;
4156        pv[kk++] = Q9_xxxyyyzzz ; pv[kk++] = Q9_xxxyyzzzz ; pv[kk++] = Q9_xxxyzzzzz ;
4157        pv[kk++] = Q9_xxxzzzzzz ; pv[kk++] = Q9_xxyyyyyyy ; pv[kk++] = Q9_xxyyyyyyz ;
4158        pv[kk++] = Q9_xxyyyyyzz ; pv[kk++] = Q9_xxyyyyzzz ; pv[kk++] = Q9_xxyyyzzzz ;
4159        pv[kk++] = Q9_xxyyzzzzz ; pv[kk++] = Q9_xxyzzzzzz ; pv[kk++] = Q9_xxzzzzzzz ;
4160        pv[kk++] = Q9_xyyyyyyyy ; pv[kk++] = Q9_xyyyyyyyz ; pv[kk++] = Q9_xyyyyyyzz ;
4161        pv[kk++] = Q9_xyyyyyzzz ; pv[kk++] = Q9_xyyyyzzzz ; pv[kk++] = Q9_xyyyzzzzz ;
4162        pv[kk++] = Q9_xyyzzzzzz ; pv[kk++] = Q9_xyzzzzzzz ; pv[kk++] = Q9_xzzzzzzzz ;
4163        pv[kk++] = Q9_yyyyyyyyy ; pv[kk++] = Q9_yyyyyyyyz ; pv[kk++] = Q9_yyyyyyyzz ;
4164        pv[kk++] = Q9_yyyyyyzzz ; pv[kk++] = Q9_yyyyyzzzz ; pv[kk++] = Q9_yyyyzzzzz ;
4165        pv[kk++] = Q9_yyyzzzzzz ; pv[kk++] = Q9_yyzzzzzzz ; pv[kk++] = Q9_yzzzzzzzz ;
4166        pv[kk++] = Q9_zzzzzzzzz ;
4167 
4168        for( kk=jj=0 ; jj < NPOLNONI ; jj++,kk+=3 ){
4169          aa += ppar[kk  ] * pv[jj] ;
4170          bb += ppar[kk+1] * pv[jj] ;
4171          cc += ppar[kk+2] * pv[jj] ;
4172        }
4173      } else {
4174        kk = -1 ;
4175        if( puse[++kk] ){ pv[kk] = P2_xx (uu,vv,ww) ; }
4176        if( puse[++kk] ){ pv[kk] = P2_xy (uu,vv,ww) ; }
4177        if( puse[++kk] ){ pv[kk] = P2_xz (uu,vv,ww) ; }
4178        if( puse[++kk] ){ pv[kk] = P2_yy (uu,vv,ww) ; }
4179        if( puse[++kk] ){ pv[kk] = P2_yz (uu,vv,ww) ; }
4180        if( puse[++kk] ){ pv[kk] = P2_zz (uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4181        if( puse[++kk] ){ pv[kk] = P3_xxx(uu,vv,ww) ; }
4182        if( puse[++kk] ){ pv[kk] = P3_xxy(uu,vv,ww) ; }
4183        if( puse[++kk] ){ pv[kk] = P3_xxz(uu,vv,ww) ; }
4184        if( puse[++kk] ){ pv[kk] = P3_xyy(uu,vv,ww) ; }
4185        if( puse[++kk] ){ pv[kk] = P3_xzz(uu,vv,ww) ; }
4186        if( puse[++kk] ){ pv[kk] = P3_xyz(uu,vv,ww) ; }
4187        if( puse[++kk] ){ pv[kk] = P3_yyy(uu,vv,ww) ; }
4188        if( puse[++kk] ){ pv[kk] = P3_yyz(uu,vv,ww) ; }
4189        if( puse[++kk] ){ pv[kk] = P3_yzz(uu,vv,ww) ; }
4190        if( puse[++kk] ){ pv[kk] = P3_zzz(uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4191        if( puse[++kk] ){ pv[kk] = P4_xxxx(uu,vv,ww) ; }
4192        if( puse[++kk] ){ pv[kk] = P4_xxxy(uu,vv,ww) ; }
4193        if( puse[++kk] ){ pv[kk] = P4_xxxz(uu,vv,ww) ; }
4194        if( puse[++kk] ){ pv[kk] = P4_xxyy(uu,vv,ww) ; }
4195        if( puse[++kk] ){ pv[kk] = P4_xxzz(uu,vv,ww) ; }
4196        if( puse[++kk] ){ pv[kk] = P4_xxyz(uu,vv,ww) ; }
4197        if( puse[++kk] ){ pv[kk] = P4_xyyy(uu,vv,ww) ; }
4198        if( puse[++kk] ){ pv[kk] = P4_xyyz(uu,vv,ww) ; }
4199        if( puse[++kk] ){ pv[kk] = P4_xyzz(uu,vv,ww) ; }
4200        if( puse[++kk] ){ pv[kk] = P4_xzzz(uu,vv,ww) ; }
4201        if( puse[++kk] ){ pv[kk] = P4_yyyy(uu,vv,ww) ; }
4202        if( puse[++kk] ){ pv[kk] = P4_yyyz(uu,vv,ww) ; }
4203        if( puse[++kk] ){ pv[kk] = P4_yyzz(uu,vv,ww) ; }
4204        if( puse[++kk] ){ pv[kk] = P4_yzzz(uu,vv,ww) ; }
4205        if( puse[++kk] ){ pv[kk] = P4_zzzz(uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4206        if( puse[++kk] ){ pv[kk] = P5_xxxxx(uu,vv,ww) ; }
4207        if( puse[++kk] ){ pv[kk] = P5_xxxxy(uu,vv,ww) ; }
4208        if( puse[++kk] ){ pv[kk] = P5_xxxxz(uu,vv,ww) ; }
4209        if( puse[++kk] ){ pv[kk] = P5_xxxyy(uu,vv,ww) ; }
4210        if( puse[++kk] ){ pv[kk] = P5_xxxzz(uu,vv,ww) ; }
4211        if( puse[++kk] ){ pv[kk] = P5_xxxyz(uu,vv,ww) ; }
4212        if( puse[++kk] ){ pv[kk] = P5_xxyyy(uu,vv,ww) ; }
4213        if( puse[++kk] ){ pv[kk] = P5_xxyyz(uu,vv,ww) ; }
4214        if( puse[++kk] ){ pv[kk] = P5_xxyzz(uu,vv,ww) ; }
4215        if( puse[++kk] ){ pv[kk] = P5_xxzzz(uu,vv,ww) ; }
4216        if( puse[++kk] ){ pv[kk] = P5_xyyyy(uu,vv,ww) ; }
4217        if( puse[++kk] ){ pv[kk] = P5_xyyyz(uu,vv,ww) ; }
4218        if( puse[++kk] ){ pv[kk] = P5_xyyzz(uu,vv,ww) ; }
4219        if( puse[++kk] ){ pv[kk] = P5_xyzzz(uu,vv,ww) ; }
4220        if( puse[++kk] ){ pv[kk] = P5_xzzzz(uu,vv,ww) ; }
4221        if( puse[++kk] ){ pv[kk] = P5_yyyyy(uu,vv,ww) ; }
4222        if( puse[++kk] ){ pv[kk] = P5_yyyyz(uu,vv,ww) ; }
4223        if( puse[++kk] ){ pv[kk] = P5_yyyzz(uu,vv,ww) ; }
4224        if( puse[++kk] ){ pv[kk] = P5_yyzzz(uu,vv,ww) ; }
4225        if( puse[++kk] ){ pv[kk] = P5_yzzzz(uu,vv,ww) ; }
4226        if( puse[++kk] ){ pv[kk] = P5_zzzzz(uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4227        if( puse[++kk] ){ pv[kk] = P6_xxxxxx(uu,vv,ww) ; }
4228        if( puse[++kk] ){ pv[kk] = P6_xxxxxy(uu,vv,ww) ; }
4229        if( puse[++kk] ){ pv[kk] = P6_xxxxxz(uu,vv,ww) ; }
4230        if( puse[++kk] ){ pv[kk] = P6_xxxxyy(uu,vv,ww) ; }
4231        if( puse[++kk] ){ pv[kk] = P6_xxxxzz(uu,vv,ww) ; }
4232        if( puse[++kk] ){ pv[kk] = P6_xxxxyz(uu,vv,ww) ; }
4233        if( puse[++kk] ){ pv[kk] = P6_xxxyyy(uu,vv,ww) ; }
4234        if( puse[++kk] ){ pv[kk] = P6_xxxyyz(uu,vv,ww) ; }
4235        if( puse[++kk] ){ pv[kk] = P6_xxxyzz(uu,vv,ww) ; }
4236        if( puse[++kk] ){ pv[kk] = P6_xxxzzz(uu,vv,ww) ; }
4237        if( puse[++kk] ){ pv[kk] = P6_xxyyyy(uu,vv,ww) ; }
4238        if( puse[++kk] ){ pv[kk] = P6_xxyyyz(uu,vv,ww) ; }
4239        if( puse[++kk] ){ pv[kk] = P6_xxyyzz(uu,vv,ww) ; }
4240        if( puse[++kk] ){ pv[kk] = P6_xxyzzz(uu,vv,ww) ; }
4241        if( puse[++kk] ){ pv[kk] = P6_xxzzzz(uu,vv,ww) ; }
4242        if( puse[++kk] ){ pv[kk] = P6_xyyyyy(uu,vv,ww) ; }
4243        if( puse[++kk] ){ pv[kk] = P6_xyyyyz(uu,vv,ww) ; }
4244        if( puse[++kk] ){ pv[kk] = P6_xyyyzz(uu,vv,ww) ; }
4245        if( puse[++kk] ){ pv[kk] = P6_xyyzzz(uu,vv,ww) ; }
4246        if( puse[++kk] ){ pv[kk] = P6_xyzzzz(uu,vv,ww) ; }
4247        if( puse[++kk] ){ pv[kk] = P6_xzzzzz(uu,vv,ww) ; }
4248        if( puse[++kk] ){ pv[kk] = P6_yyyyyy(uu,vv,ww) ; }
4249        if( puse[++kk] ){ pv[kk] = P6_yyyyyz(uu,vv,ww) ; }
4250        if( puse[++kk] ){ pv[kk] = P6_yyyyzz(uu,vv,ww) ; }
4251        if( puse[++kk] ){ pv[kk] = P6_yyyzzz(uu,vv,ww) ; }
4252        if( puse[++kk] ){ pv[kk] = P6_yyzzzz(uu,vv,ww) ; }
4253        if( puse[++kk] ){ pv[kk] = P6_yzzzzz(uu,vv,ww) ; }
4254        if( puse[++kk] ){ pv[kk] = P6_zzzzzz(uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4255        if( puse[++kk] ){ pv[kk] = P7_xxxxxxx(uu,vv,ww) ; }
4256        if( puse[++kk] ){ pv[kk] = P7_xxxxxxy(uu,vv,ww) ; }
4257        if( puse[++kk] ){ pv[kk] = P7_xxxxxxz(uu,vv,ww) ; }
4258        if( puse[++kk] ){ pv[kk] = P7_xxxxxyy(uu,vv,ww) ; }
4259        if( puse[++kk] ){ pv[kk] = P7_xxxxxzz(uu,vv,ww) ; }
4260        if( puse[++kk] ){ pv[kk] = P7_xxxxxyz(uu,vv,ww) ; }
4261        if( puse[++kk] ){ pv[kk] = P7_xxxxyyy(uu,vv,ww) ; }
4262        if( puse[++kk] ){ pv[kk] = P7_xxxxyyz(uu,vv,ww) ; }
4263        if( puse[++kk] ){ pv[kk] = P7_xxxxyzz(uu,vv,ww) ; }
4264        if( puse[++kk] ){ pv[kk] = P7_xxxxzzz(uu,vv,ww) ; }
4265        if( puse[++kk] ){ pv[kk] = P7_xxxyyyy(uu,vv,ww) ; }
4266        if( puse[++kk] ){ pv[kk] = P7_xxxyyyz(uu,vv,ww) ; }
4267        if( puse[++kk] ){ pv[kk] = P7_xxxyyzz(uu,vv,ww) ; }
4268        if( puse[++kk] ){ pv[kk] = P7_xxxyzzz(uu,vv,ww) ; }
4269        if( puse[++kk] ){ pv[kk] = P7_xxxzzzz(uu,vv,ww) ; }
4270        if( puse[++kk] ){ pv[kk] = P7_xxyyyyy(uu,vv,ww) ; }
4271        if( puse[++kk] ){ pv[kk] = P7_xxyyyyz(uu,vv,ww) ; }
4272        if( puse[++kk] ){ pv[kk] = P7_xxyyyzz(uu,vv,ww) ; }
4273        if( puse[++kk] ){ pv[kk] = P7_xxyyzzz(uu,vv,ww) ; }
4274        if( puse[++kk] ){ pv[kk] = P7_xxyzzzz(uu,vv,ww) ; }
4275        if( puse[++kk] ){ pv[kk] = P7_xxzzzzz(uu,vv,ww) ; }
4276        if( puse[++kk] ){ pv[kk] = P7_xyyyyyy(uu,vv,ww) ; }
4277        if( puse[++kk] ){ pv[kk] = P7_xyyyyyz(uu,vv,ww) ; }
4278        if( puse[++kk] ){ pv[kk] = P7_xyyyyzz(uu,vv,ww) ; }
4279        if( puse[++kk] ){ pv[kk] = P7_xyyyzzz(uu,vv,ww) ; }
4280        if( puse[++kk] ){ pv[kk] = P7_xyyzzzz(uu,vv,ww) ; }
4281        if( puse[++kk] ){ pv[kk] = P7_xyzzzzz(uu,vv,ww) ; }
4282        if( puse[++kk] ){ pv[kk] = P7_xzzzzzz(uu,vv,ww) ; }
4283        if( puse[++kk] ){ pv[kk] = P7_yyyyyyy(uu,vv,ww) ; }
4284        if( puse[++kk] ){ pv[kk] = P7_yyyyyyz(uu,vv,ww) ; }
4285        if( puse[++kk] ){ pv[kk] = P7_yyyyyzz(uu,vv,ww) ; }
4286        if( puse[++kk] ){ pv[kk] = P7_yyyyzzz(uu,vv,ww) ; }
4287        if( puse[++kk] ){ pv[kk] = P7_yyyzzzz(uu,vv,ww) ; }
4288        if( puse[++kk] ){ pv[kk] = P7_yyzzzzz(uu,vv,ww) ; }
4289        if( puse[++kk] ){ pv[kk] = P7_yzzzzzz(uu,vv,ww) ; }
4290        if( puse[++kk] ){ pv[kk] = P7_zzzzzzz(uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4291        if( puse[++kk] ){ pv[kk] = P8_xxxxxxxx(uu,vv,ww) ; }
4292        if( puse[++kk] ){ pv[kk] = P8_xxxxxxxy(uu,vv,ww) ; }
4293        if( puse[++kk] ){ pv[kk] = P8_xxxxxxxz(uu,vv,ww) ; }
4294        if( puse[++kk] ){ pv[kk] = P8_xxxxxxyy(uu,vv,ww) ; }
4295        if( puse[++kk] ){ pv[kk] = P8_xxxxxxzz(uu,vv,ww) ; }
4296        if( puse[++kk] ){ pv[kk] = P8_xxxxxxyz(uu,vv,ww) ; }
4297        if( puse[++kk] ){ pv[kk] = P8_xxxxxyyy(uu,vv,ww) ; }
4298        if( puse[++kk] ){ pv[kk] = P8_xxxxxyyz(uu,vv,ww) ; }
4299        if( puse[++kk] ){ pv[kk] = P8_xxxxxyzz(uu,vv,ww) ; }
4300        if( puse[++kk] ){ pv[kk] = P8_xxxxxzzz(uu,vv,ww) ; }
4301        if( puse[++kk] ){ pv[kk] = P8_xxxxyyyy(uu,vv,ww) ; }
4302        if( puse[++kk] ){ pv[kk] = P8_xxxxyyyz(uu,vv,ww) ; }
4303        if( puse[++kk] ){ pv[kk] = P8_xxxxyyzz(uu,vv,ww) ; }
4304        if( puse[++kk] ){ pv[kk] = P8_xxxxyzzz(uu,vv,ww) ; }
4305        if( puse[++kk] ){ pv[kk] = P8_xxxxzzzz(uu,vv,ww) ; }
4306        if( puse[++kk] ){ pv[kk] = P8_xxxyyyyy(uu,vv,ww) ; }
4307        if( puse[++kk] ){ pv[kk] = P8_xxxyyyyz(uu,vv,ww) ; }
4308        if( puse[++kk] ){ pv[kk] = P8_xxxyyyzz(uu,vv,ww) ; }
4309        if( puse[++kk] ){ pv[kk] = P8_xxxyyzzz(uu,vv,ww) ; }
4310        if( puse[++kk] ){ pv[kk] = P8_xxxyzzzz(uu,vv,ww) ; }
4311        if( puse[++kk] ){ pv[kk] = P8_xxxzzzzz(uu,vv,ww) ; }
4312        if( puse[++kk] ){ pv[kk] = P8_xxyyyyyy(uu,vv,ww) ; }
4313        if( puse[++kk] ){ pv[kk] = P8_xxyyyyyz(uu,vv,ww) ; }
4314        if( puse[++kk] ){ pv[kk] = P8_xxyyyyzz(uu,vv,ww) ; }
4315        if( puse[++kk] ){ pv[kk] = P8_xxyyyzzz(uu,vv,ww) ; }
4316        if( puse[++kk] ){ pv[kk] = P8_xxyyzzzz(uu,vv,ww) ; }
4317        if( puse[++kk] ){ pv[kk] = P8_xxyzzzzz(uu,vv,ww) ; }
4318        if( puse[++kk] ){ pv[kk] = P8_xxzzzzzz(uu,vv,ww) ; }
4319        if( puse[++kk] ){ pv[kk] = P8_xyyyyyyy(uu,vv,ww) ; }
4320        if( puse[++kk] ){ pv[kk] = P8_xyyyyyyz(uu,vv,ww) ; }
4321        if( puse[++kk] ){ pv[kk] = P8_xyyyyyzz(uu,vv,ww) ; }
4322        if( puse[++kk] ){ pv[kk] = P8_xyyyyzzz(uu,vv,ww) ; }
4323        if( puse[++kk] ){ pv[kk] = P8_xyyyzzzz(uu,vv,ww) ; }
4324        if( puse[++kk] ){ pv[kk] = P8_xyyzzzzz(uu,vv,ww) ; }
4325        if( puse[++kk] ){ pv[kk] = P8_xyzzzzzz(uu,vv,ww) ; }
4326        if( puse[++kk] ){ pv[kk] = P8_xzzzzzzz(uu,vv,ww) ; }
4327        if( puse[++kk] ){ pv[kk] = P8_yyyyyyyy(uu,vv,ww) ; }
4328        if( puse[++kk] ){ pv[kk] = P8_yyyyyyyz(uu,vv,ww) ; }
4329        if( puse[++kk] ){ pv[kk] = P8_yyyyyyzz(uu,vv,ww) ; }
4330        if( puse[++kk] ){ pv[kk] = P8_yyyyyzzz(uu,vv,ww) ; }
4331        if( puse[++kk] ){ pv[kk] = P8_yyyyzzzz(uu,vv,ww) ; }
4332        if( puse[++kk] ){ pv[kk] = P8_yyyzzzzz(uu,vv,ww) ; }
4333        if( puse[++kk] ){ pv[kk] = P8_yyzzzzzz(uu,vv,ww) ; }
4334        if( puse[++kk] ){ pv[kk] = P8_yzzzzzzz(uu,vv,ww) ; }
4335        if( puse[++kk] ){ pv[kk] = P8_zzzzzzzz(uu,vv,ww) ; if( kk >= plast ) goto PUSE_LOOP ; }
4336        if( puse[++kk] ){ pv[kk] = P9_xxxxxxxxx(uu,vv,ww) ; }
4337        if( puse[++kk] ){ pv[kk] = P9_xxxxxxxxy(uu,vv,ww) ; }
4338        if( puse[++kk] ){ pv[kk] = P9_xxxxxxxxz(uu,vv,ww) ; }
4339        if( puse[++kk] ){ pv[kk] = P9_xxxxxxxyy(uu,vv,ww) ; }
4340        if( puse[++kk] ){ pv[kk] = P9_xxxxxxxzz(uu,vv,ww) ; }
4341        if( puse[++kk] ){ pv[kk] = P9_xxxxxxxyz(uu,vv,ww) ; }
4342        if( puse[++kk] ){ pv[kk] = P9_xxxxxxyyy(uu,vv,ww) ; }
4343        if( puse[++kk] ){ pv[kk] = P9_xxxxxxyyz(uu,vv,ww) ; }
4344        if( puse[++kk] ){ pv[kk] = P9_xxxxxxyzz(uu,vv,ww) ; }
4345        if( puse[++kk] ){ pv[kk] = P9_xxxxxxzzz(uu,vv,ww) ; }
4346        if( puse[++kk] ){ pv[kk] = P9_xxxxxyyyy(uu,vv,ww) ; }
4347        if( puse[++kk] ){ pv[kk] = P9_xxxxxyyyz(uu,vv,ww) ; }
4348        if( puse[++kk] ){ pv[kk] = P9_xxxxxyyzz(uu,vv,ww) ; }
4349        if( puse[++kk] ){ pv[kk] = P9_xxxxxyzzz(uu,vv,ww) ; }
4350        if( puse[++kk] ){ pv[kk] = P9_xxxxxzzzz(uu,vv,ww) ; }
4351        if( puse[++kk] ){ pv[kk] = P9_xxxxyyyyy(uu,vv,ww) ; }
4352        if( puse[++kk] ){ pv[kk] = P9_xxxxyyyyz(uu,vv,ww) ; }
4353        if( puse[++kk] ){ pv[kk] = P9_xxxxyyyzz(uu,vv,ww) ; }
4354        if( puse[++kk] ){ pv[kk] = P9_xxxxyyzzz(uu,vv,ww) ; }
4355        if( puse[++kk] ){ pv[kk] = P9_xxxxyzzzz(uu,vv,ww) ; }
4356        if( puse[++kk] ){ pv[kk] = P9_xxxxzzzzz(uu,vv,ww) ; }
4357        if( puse[++kk] ){ pv[kk] = P9_xxxyyyyyy(uu,vv,ww) ; }
4358        if( puse[++kk] ){ pv[kk] = P9_xxxyyyyyz(uu,vv,ww) ; }
4359        if( puse[++kk] ){ pv[kk] = P9_xxxyyyyzz(uu,vv,ww) ; }
4360        if( puse[++kk] ){ pv[kk] = P9_xxxyyyzzz(uu,vv,ww) ; }
4361        if( puse[++kk] ){ pv[kk] = P9_xxxyyzzzz(uu,vv,ww) ; }
4362        if( puse[++kk] ){ pv[kk] = P9_xxxyzzzzz(uu,vv,ww) ; }
4363        if( puse[++kk] ){ pv[kk] = P9_xxxzzzzzz(uu,vv,ww) ; }
4364        if( puse[++kk] ){ pv[kk] = P9_xxyyyyyyy(uu,vv,ww) ; }
4365        if( puse[++kk] ){ pv[kk] = P9_xxyyyyyyz(uu,vv,ww) ; }
4366        if( puse[++kk] ){ pv[kk] = P9_xxyyyyyzz(uu,vv,ww) ; }
4367        if( puse[++kk] ){ pv[kk] = P9_xxyyyyzzz(uu,vv,ww) ; }
4368        if( puse[++kk] ){ pv[kk] = P9_xxyyyzzzz(uu,vv,ww) ; }
4369        if( puse[++kk] ){ pv[kk] = P9_xxyyzzzzz(uu,vv,ww) ; }
4370        if( puse[++kk] ){ pv[kk] = P9_xxyzzzzzz(uu,vv,ww) ; }
4371        if( puse[++kk] ){ pv[kk] = P9_xxzzzzzzz(uu,vv,ww) ; }
4372        if( puse[++kk] ){ pv[kk] = P9_xyyyyyyyy(uu,vv,ww) ; }
4373        if( puse[++kk] ){ pv[kk] = P9_xyyyyyyyz(uu,vv,ww) ; }
4374        if( puse[++kk] ){ pv[kk] = P9_xyyyyyyzz(uu,vv,ww) ; }
4375        if( puse[++kk] ){ pv[kk] = P9_xyyyyyzzz(uu,vv,ww) ; }
4376        if( puse[++kk] ){ pv[kk] = P9_xyyyyzzzz(uu,vv,ww) ; }
4377        if( puse[++kk] ){ pv[kk] = P9_xyyyzzzzz(uu,vv,ww) ; }
4378        if( puse[++kk] ){ pv[kk] = P9_xyyzzzzzz(uu,vv,ww) ; }
4379        if( puse[++kk] ){ pv[kk] = P9_xyzzzzzzz(uu,vv,ww) ; }
4380        if( puse[++kk] ){ pv[kk] = P9_xzzzzzzzz(uu,vv,ww) ; }
4381        if( puse[++kk] ){ pv[kk] = P9_yyyyyyyyy(uu,vv,ww) ; }
4382        if( puse[++kk] ){ pv[kk] = P9_yyyyyyyyz(uu,vv,ww) ; }
4383        if( puse[++kk] ){ pv[kk] = P9_yyyyyyyzz(uu,vv,ww) ; }
4384        if( puse[++kk] ){ pv[kk] = P9_yyyyyyzzz(uu,vv,ww) ; }
4385        if( puse[++kk] ){ pv[kk] = P9_yyyyyzzzz(uu,vv,ww) ; }
4386        if( puse[++kk] ){ pv[kk] = P9_yyyyzzzzz(uu,vv,ww) ; }
4387        if( puse[++kk] ){ pv[kk] = P9_yyyzzzzzz(uu,vv,ww) ; }
4388        if( puse[++kk] ){ pv[kk] = P9_yyzzzzzzz(uu,vv,ww) ; }
4389        if( puse[++kk] ){ pv[kk] = P9_yzzzzzzzz(uu,vv,ww) ; }
4390        if( puse[++kk] ){ pv[kk] = P9_zzzzzzzzz(uu,vv,ww) ; }
4391 
4392 PUSE_LOOP:
4393        for( kk=jj=0 ; jj <= plast ; jj++,kk+=3 ){
4394          if( puse[jj] ){
4395            aa += ppar[kk  ] * pv[jj] ;
4396            bb += ppar[kk+1] * pv[jj] ;
4397            cc += ppar[kk+2] * pv[jj] ;
4398          }
4399        }
4400      }
4401 
4402      if( aff_use_after ){                    /* convert back to indexes */
4403        MAT44_VEC( aff_after , aa,bb,cc , xo[ii],yo[ii],zo[ii] ) ;
4404      } else {
4405        xo[ii] = aa ; yo[ii] = bb ; zo[ii] = cc ;
4406      }
4407 
4408    } /* end of loop over input points */
4409  }
4410  AFNI_OMP_END ;
4411 
4412 #if 0
4413  if( AFNI_yesenv("ALLIN_DEBUG") ){
4414    int ii ; float dd,dmax=0.0f ;
4415    static int ncall=0 ;
4416    for( ii=0 ; ii < npt ; ii++ ){
4417      dd = MYfabsf(xo[ii]-xi[ii]) + MYfabsf(yo[ii]-yi[ii]) + MYfabsf(zo[ii]-zi[ii]) ;
4418      if( dd > dmax ) dmax = dd ;
4419    }
4420    ncall++ ; ININFO_message("nonic %d: dmax = %.4g",ncall,dmax) ;
4421  }
4422 #endif
4423 
4424    EXRETURN ;
4425 }
4426 #endif /* ALLOW_NWARP */ /**************************************************/
4427 
4428 #if 0
4429 /****************************************************************************/
4430 /************** Nonlinear sum of basis fields stored in arrays **************/
4431 
4432 static int wsum_npar = 0 ;
4433 
4434 static int wsum_nx=0 , wsum_ny=0 , wsum_nz=0 , wsum_nxy=0 , wsum_nxyz=0 ;
4435 static float wsum_dx , wsum_dy , wsum_dz ;
4436 
4437 static float  *wsum_xinit  = NULL ;
4438 static float  *wsum_yinit  = NULL ;
4439 static float  *wsum_zinit  = NULL ;
4440 static float **wsum_xbasis = NULL ;
4441 static float **wsum_ybasis = NULL ;
4442 static float **wsum_zbasis = NULL ;
4443 static float  *wsum_param  = NULL ;
4444 
4445 /*--------------------------------------------------------------------------*/
4446 
4447 void mri_genalign_warpsum_set_dimen( int   nx, int   ny, int   nz,
4448                                      float dx, float dy, float dz )
4449 {
4450    int ii ;
4451 
4452    if( nx < 1 ){ nx = ny = nz = 0 ; }
4453 
4454    wsum_nx = nx ;
4455    wsum_ny = ny ; wsum_nxy  = nx*ny ;
4456    wsum_nz = nz ; wsum_nxyz = wsum_nx * nz ;
4457    wsum_dx = dx ; wsum_dy = dy ; wsum_dz = dz ;
4458 
4459    IFREE(wsum_xinit); IFREE(wsum_yinit); IFREE(wsum_zinit); IFREE(wsum_param);
4460 
4461    if( wsum_xbasis != NULL ){
4462      for( ii=0 ; ii < wsum_npar ; ii++ ){
4463        IFREE(wsum_xbasis[ii]); IFREE(wsum_ybasis[ii]); IFREE(wsum_zbasis[ii]);
4464      }
4465      IFREE(wsum_xbasis); IFREE(wsum_ybasis); IFREE(wsum_zbasis);
4466    }
4467    wsum_npar = 0 ;
4468 
4469    return ;
4470 }
4471 
4472 /*--------------------------------------------------------------------------*/
4473 /*! A wfunc function for nonlinear transformations. */
4474 /*--------------------------------------------------------------------------*/
4475 
4476 void mri_genalign_warpsum( int npar, float *wpar ,
4477                            int npt , float *xi, float *yi, float *zi ,
4478                                      float *xo, float *yo, float *zo  )
4479 {
4480    register int ii,jj , pp,qq,rr , pqr ;
4481    register float xv,yv,zv , *wp ;
4482 
4483    /** new parameters **/
4484 
4485    if( npar > 0 && wpar != NULL ){
4486      memcpy( wsum_param , wpar , sizeof(float)*npar ) ;
4487    }
4488    wp = wsum_param ;
4489 
4490    /* nothing to transform? */
4491 
4492    if( npt <= 0 || xi == NULL || xo == NULL ) return ;
4493 
4494    /* multiply matrix times input vectors */
4495 
4496    for( ii=0 ; ii < npt ; ii++ ){
4497      pp  = (int)xi[ii] ; qq = (int)yi[ii] ; rr = (int)zi[ii] ;
4498      pqr = pp + qq*wsum_nx + rr*wsum_nxy ;
4499      xv  = wsum_xinit[pqr] ; yv  = wsum_yinit[pqr] ; zv  = wsum_zinit[pqr] ;
4500      for( jj=0 ; jj < wsum_npar ; jj++ ){
4501        xv += wp[jj] * wsum_xbasis[jj][pqr] ;
4502        yv += wp[jj] * wsum_ybasis[jj][pqr] ;
4503        zv += wp[jj] * wsum_zbasis[jj][pqr] ;
4504      }
4505      xo[ii] = xv ; yo[ii] = yv ; zo[ii] = zv ;
4506    }
4507 
4508    return ;
4509 }
4510 #endif
4511 
4512 /*----------------------------------------------------------------------------*/
4513 /* Note that the setup call to wfunc() must have been made before this call. */
4514 
GA_get_warped_overlap_fraction(void)4515 float GA_get_warped_overlap_fraction(void)
4516 {
4517    int    npar , ii,jj,kk,qq,pp,nqq,mm,nx,nxy , nxt,nxyt , npt,nhit ;
4518    float *imf, *jmf, *kmf, *imw, *jmw, *kmw , xx,yy,zz,nxh,nyh,nzh , frac ;
4519    byte *bsar, *tgar ;
4520 #ifdef USE_OMP
4521    byte *hhh ;
4522 #endif
4523 
4524 ENTRY("GA_get_warped_overlap") ;
4525 
4526    if( gstup->bsmask == NULL || gstup->ajmask ==NULL ) RETURN(1.0f) ;
4527    bsar = MRI_BYTE_PTR(gstup->bsmask) ;
4528    tgar = MRI_BYTE_PTR(gstup->ajmask) ;
4529 
4530    npar = gstup->wfunc_numpar ;
4531    npt  = gstup->bsmask->nvox ;  /* number of total voxels in base */
4532    nqq  = gstup->nbsmask ;       /* number of mask voxels in base */
4533 
4534    nx = gstup->bsmask->nx; nxy = nx * gstup->bsmask->ny;
4535 
4536    nxt = gstup->ajmask->nx ; nxyt = nxt * gstup->ajmask->ny ;
4537    nxh = nxt-0.501f ; nyh = gstup->ajmask->ny-0.501f ; nzh = gstup->ajmask->nz-0.501f ;
4538 
4539 #if 0
4540    /* send parameters to warping function for its setup */
4541 
4542    gstup->wfunc( npar , wpar , 0,NULL,NULL,NULL , NULL,NULL,NULL ) ;
4543 #endif
4544 
4545    /*--- load 3D indexes to transform from base to target ---*/
4546 
4547    imf = (float *)malloc(sizeof(float)*nqq) ;
4548    jmf = (float *)malloc(sizeof(float)*nqq) ;
4549    kmf = (float *)malloc(sizeof(float)*nqq) ;
4550 
4551    for( pp=qq=0 ; pp < npt ; pp++ ){
4552      if( bsar[pp] ){
4553        ii = pp % nx ; kk = pp / nxy ; jj = (pp-kk*nxy) / nx ;
4554        imf[qq] = (float)ii ; jmf[qq] = (float)jj ; kmf[qq] = (float)kk; qq++ ;
4555      }
4556    }
4557 
4558    /*--- warp to new locations ---*/
4559 
4560    imw = (float *)malloc(sizeof(float)*nqq) ;
4561    jmw = (float *)malloc(sizeof(float)*nqq) ;
4562    kmw = (float *)malloc(sizeof(float)*nqq) ;
4563 
4564    gstup->wfunc( npar , NULL , nqq , imf,jmf,kmf , imw,jmw,kmw ) ;
4565 
4566    free(kmf); free(jmf); free(imf);
4567 
4568    /* check target mask at warped points */
4569 
4570 #ifndef USE_OMP
4571    for( nhit=qq=0 ; qq < nqq ; qq++ ){
4572      xx = imw[qq] ; if( xx < -0.499f || xx > nxh ) continue ;
4573      yy = jmw[qq] ; if( yy < -0.499f || yy > nyh ) continue ;
4574      zz = kmw[qq] ; if( zz < -0.499f || zz > nzh ) continue ;
4575      ii = (int)(xx+0.5f) ; jj = (int)(yy+0.5f) ; kk = (int)(zz+0.5f) ;
4576      if( tgar[ii+jj*nxt+kk*nxyt] ) nhit++ ;
4577    }
4578 #else
4579    hhh = (byte *)calloc(sizeof(byte),nqq) ;
4580  AFNI_OMP_START ;
4581 #pragma omp parallel if( nqq > 33333 )
4582  { int ii,jj,kk,qq ; float xx,yy,zz ;
4583 #pragma omp for
4584    for( qq=0 ; qq < nqq ; qq++ ){
4585      xx = imw[qq] ; if( xx < -0.499f || xx > nxh ) continue ;
4586      yy = jmw[qq] ; if( yy < -0.499f || yy > nyh ) continue ;
4587      zz = kmw[qq] ; if( zz < -0.499f || zz > nzh ) continue ;
4588      ii = (int)(xx+0.5f) ; jj = (int)(yy+0.5f) ; kk = (int)(zz+0.5f) ;
4589      if( tgar[ii+jj*nxt+kk*nxyt] ) hhh[qq] = 1 ;
4590    }
4591  }
4592  AFNI_OMP_END ;
4593  for( nhit=qq=0 ; qq < nqq ; qq++ ) nhit += hhh[qq] ;
4594  free(hhh) ;
4595 #endif
4596 
4597    free((void *)kmw); free((void *)jmw); free((void *)imw);
4598 
4599    xx = gstup->nbsmask ;
4600    yy = gstup->najmask * gstup->ajim->dx * gstup->ajim->dy * gstup->ajim->dz /
4601                        ( gstup->bsim->dx * gstup->bsim->dy * gstup->bsim->dz ) ;
4602 
4603    frac = nhit / MIN(xx,yy) ; RETURN(frac) ;
4604 }
4605