1 #include "mrilib.h"
2
3 #include "mri_pvmap.c"
4
5 /*** Functions for doing 4D image to grayplot conversion ***/
6
7 /*--------------------------------------------------------------------------*/
8
9 /*----- levels = classes of voxels that are grouped together -----*/
10
11 static int lev_num ;
12 static int lev_siz[256] ;
13
14 /*----- ordering of voxel time series vertically, within each level -----*/
15
16 #define ORDER_IJK 0
17 #define ORDER_PV 1
18 #define ORDER_PEEL 2
19 #define ORDER_LJ 3
20
21 static int ordering = 0 ;
22
grayplot_order_by_pvmap(int yesno)23 static void grayplot_order_by_pvmap( int yesno ){
24 ordering = (yesno) ? ORDER_PV : 0 ;
25 }
grayplot_order_by_peels(int yesno)26 static void grayplot_order_by_peels( int yesno ){
27 ordering = (yesno) ? ORDER_PEEL : 0 ;
28 }
grayplot_order_by_ijk(int yesno)29 static void grayplot_order_by_ijk( int yesno ){
30 ordering = 0 ;
31 }
grayplot_order_by_lj(int yesno)32 static void grayplot_order_by_lj( int yesno ){ /* 05 Feb 2020 */
33 ordering = (yesno) ? ORDER_LJ : 0 ;
34 }
35
grayplot_is_ordered_by_peels(void)36 static int grayplot_is_ordered_by_peels( void ){ /* [18 Jun 2019 rickr] */
37 return (ordering == ORDER_PEEL);
38 }
grayplot_is_ordered_by_pvmap(void)39 static int grayplot_is_ordered_by_pvmap( void ){
40 return (ordering == ORDER_PV);
41 }
grayplot_is_ordered_by_lj(void)42 static int grayplot_is_ordered_by_lj( void ){ /* 05 Feb 2020 */
43 return (ordering == ORDER_LJ);
44 }
45
46 /*----- how time series are normalized [30 Jul 2018] -----*/
47
48 static int norming = 0 ;
49
50 #define NORM_RMS 0
51 #define NORM_NONE 1
52 #define NORM_MAXABS 2
53
grayplot_norming_RMS(void)54 static void grayplot_norming_RMS(void){
55 norming = NORM_RMS ;
56 }
57
grayplot_norming_none(void)58 static void grayplot_norming_none(void){
59 norming = NORM_NONE ;
60 }
61
grayplot_norming_maxabs(void)62 static void grayplot_norming_maxabs(void){
63 norming = NORM_MAXABS ;
64 }
65
66 /*----- grayplot range for 1 unit of data -----*/
67
68 static float grange = 0.0f ;
69
grayplot_set_range(float val)70 static void grayplot_set_range( float val ){
71 grange = MAX(val,0.0f) ;
72 }
73
74 /*----- grayplot raw_with_bounds range -----*/
75
76 static float raw_range_bot = 1.0f ;
77 static float raw_range_top = -1.0f ;
78 static int DO_RAW_RANGE = 0;
79
grayplot_set_raw_range(float bnd_bot,float bnd_top)80 static void grayplot_set_raw_range( float bnd_bot, float bnd_top){
81 raw_range_bot = bnd_bot;
82 raw_range_top = bnd_top;
83 DO_RAW_RANGE = 1;
84 }
85
86 /*----- percentage? -----*/
87
88 static int do_percent = 0 ;
grayplot_set_percent(void)89 static void grayplot_set_percent(void){ do_percent = 1 ; }
90
91 /*----- use old resampling method? -----*/
92
93 static int use_old_resam = 0 ;
grayplot_set_use_old_resam(void)94 static void grayplot_set_use_old_resam(void){ use_old_resam = 1 ; }
95
96 /*--------------------------------------------------------------------------*/
97 /* Convert to a vectim, using mmask to pick out the levels
98 of different voxels to keep together (e.g., GM, WM, CSF)
99 *//*------------------------------------------------------------------------*/
100
THD_dset_grayplot_prep(THD_3dim_dataset * dset,byte * mmask,int polort,float fwhm)101 static MRI_vectim * THD_dset_grayplot_prep( THD_3dim_dataset *dset ,
102 byte *mmask ,
103 int polort , float fwhm )
104 {
105 Aint cmval , nmask , nxyz , nts ;
106 byte mm,*tmask ; Aint ii,jj,kk ;
107 Aint nvim ; MRI_vectim **vim , *vout ;
108 float *tsar , *fit=NULL , fac ;
109
110 /* check inputs for plausibility */
111
112 ENTRY("THD_dset_grayplot_prep") ;
113
114 lev_num = 0 ;
115 if( !ISVALID_DSET(dset) || mmask == NULL ) RETURN(NULL) ;
116 nts = DSET_NVALS(dset) ;
117 if( nts < 19 ) RETURN(NULL) ;
118
119 nxyz = DSET_NVOX(dset) ;
120 nmask = THD_countmask( nxyz , mmask ) ;
121 if( nmask < 19 ) RETURN(NULL) ;
122
123 if( do_percent && polort < 0 ) polort = 0 ;
124
125 /* find voxels in each mmask 'level' */
126
127 nvim = 0 ; vim = NULL ;
128 tmask = (byte *)malloc(sizeof(byte)*nxyz) ; /* binary mask */
129
130 if( polort >= 0 ){
131 fit = (float *)malloc(sizeof(float)*(polort+1)) ; /* setup polorts */
132 }
133
134 for( ii=1 ; ii <= 255 ; ii++ ){ /* loop over mmask levels */
135
136 /* create binary mask for this level */
137 mm = (byte)ii ;
138 memset( tmask , 0 , sizeof(byte)*nxyz ) ;
139 for( cmval=jj=0 ; jj < nxyz ; jj++ ){
140 if( mmask[jj] == mm ){ cmval++ ; tmask[jj] = 1 ; }
141 }
142
143 /* extract voxels at this level into a vectim */
144 if( cmval > 9 ){
145 vim = (MRI_vectim **)realloc( vim , sizeof(MRI_vectim *)*(nvim+1) ) ;
146 vim[nvim] = THD_dset_to_vectim( dset , tmask , 0 ) ;
147 if( polort >= 0 ){
148 #if 1
149 ININFO_message(" Detrending time series with polort=%d",polort) ;
150 #endif
151 for( jj=0 ; jj < vim[nvim]->nvec ; jj++ ){ /* detrend */
152 tsar = VECTIM_PTR( vim[nvim] , jj ) ; fit[0] = 0.0f ;
153 THD_generic_detrend_LSQ( nts,tsar , polort , 0,NULL,fit ) ;
154 if( do_percent && fit[0] > 0.0f ){
155 fac = 100.0f / fit[0] ;
156 for( kk=0 ; kk < nts ; kk++ ) tsar[kk] *= fac ;
157 }
158 }
159 }
160
161 switch( norming ){ /* normalize */
162
163 case NORM_RMS:
164 default:{
165 #if 1
166 ININFO_message(" RMS norming time series") ;
167 #endif
168 for( jj=0 ; jj < vim[nvim]->nvec ; jj++ ){ /* set RMS = 1 */
169 tsar = VECTIM_PTR( vim[nvim] , jj ) ;
170 THD_normRMS( nts , tsar ) ;
171 }
172 }
173 break ;
174
175 case NORM_NONE: /*nada*/
176 break ;
177
178 case NORM_MAXABS:{ /* scale so max(abs(x)) = 1 */
179 float mab,val ;
180 #if 1
181 ININFO_message(" MAXABS norming time series") ;
182 #endif
183 for( jj=0 ; jj < vim[nvim]->nvec ; jj++ ){ /* set RMS = 1 */
184 tsar = VECTIM_PTR( vim[nvim] , jj ) ;
185 mab = fabsf(tsar[0]) ;
186 for( kk=1 ; kk < nts ; kk++){
187 val = fabsf(tsar[kk]) ; if( val > mab ) mab = val ;
188 }
189 if( mab > 0.0f ){
190 val = 1.0f / mab ;
191 for( kk=0 ; kk < nts ; kk++ ) tsar[kk] *= val ;
192 }
193 }
194 }
195 break ;
196
197 }
198
199 if( fwhm > 0.0f ){ /* spatially blur inside this level */
200 #if 1
201 ININFO_message(" Blurring (in mask) volumes fwhm=%.3g",fwhm) ;
202 #endif
203 mri_blur3D_vectim( vim[nvim] , fwhm ) ;
204 }
205
206 /* re-order spatially, as ordered */
207
208 switch( ordering ){
209 default: break ; /* == IJK */
210
211 case ORDER_PV:{ /* == by coherence with 1st 2 principal vectors */
212 MRI_IMAGE *tim , *pim ;
213 Aint *kim ;
214 float *tar , *par ;
215 #if 1
216 ININFO_message(" Computing PV order for mask partition #%d - %d voxels", ii,cmval) ;
217 #endif
218 /* copy data vectors into temporary image,
219 along with originating index of each vector */
220 tim = mri_new(nts,cmval,MRI_float) ; tar = MRI_FLOAT_PTR(tim) ;
221 kim = (Aint *)malloc(sizeof(Aint)*cmval) ;
222 for( jj=0 ; jj < cmval ; jj++ ){
223 memcpy( tar+jj*nts, VECTIM_PTR(vim[nvim],jj), sizeof(float)*nts ) ;
224 kim[jj] = jj ; /* source index */
225 }
226 /* make the PVmap */
227 #if 1
228 ININFO_message(" Computing mri_vec_to_pvmap") ;
229 #endif
230 pim = mri_vec_to_pvmap(tim) ; par = MRI_FLOAT_PTR(pim) ;
231 /* sort so largest are first, keeping track of whence they came */
232 #if 1
233 ININFO_message(" Sorting mri_vec_to_pvmap") ;
234 #endif
235 for( jj=0 ; jj < cmval ; jj++ ) par[jj] = -par[jj] ;
236 #if Aintsize == 64
237 qsort_floatint64_t( cmval , par , kim ) ;
238 #else
239 qsort_floatint( cmval , par , kim ) ;
240 #endif
241 #if 1
242 ININFO_message(" Putting sorted vectors back into vectim") ;
243 #endif
244
245 /* copy from temp image back to vectim, in the right order */
246 for( jj=0 ; jj < cmval ; jj++ ){
247 memcpy( VECTIM_PTR(vim[nvim],jj), tar+kim[jj]*nts, sizeof(float)*nts ) ;
248 }
249 mri_free(tim) ; free(kim) ; mri_free(pim) ; /* toss the trash */
250 }
251 break ;
252
253 case ORDER_PEEL:{ /* == by order of peeling from outside */
254 short *depth=NULL ; Aint kk ;
255 depth = THD_mask_depth( DSET_NX(dset),DSET_NY(dset),DSET_NZ(dset) ,
256 tmask , 1 , NULL, 2 ) ;
257 if( depth != NULL ){
258 Aint *idepth = (Aint *)calloc(sizeof(Aint),cmval) ;
259 Aint *kim = (Aint *)calloc(sizeof(Aint),cmval) ;
260 MRI_IMAGE *tim = mri_new(nts,cmval,MRI_float) ;
261 float *tar = MRI_FLOAT_PTR(tim) ;
262 #if 0
263 ININFO_message(" Computing PEEL order for mask partition #%d - %d voxels",
264 ii,cmval) ;
265 #endif
266 for( jj=0 ; jj < cmval ; jj++ ){
267 memcpy( tar+jj*nts, VECTIM_PTR(vim[nvim],jj), sizeof(float)*nts ) ;
268 kim[jj] = jj ;
269 }
270 for( jj=kk=0 ; jj < nxyz ; jj++ ){
271 if( tmask[jj] ) idepth[kk++] = depth[jj] ;
272 }
273 #if Aintsize == 64
274 qsort_intint64_t( cmval , idepth , kim ) ;
275 #else
276 qsort_intint( cmval , idepth , kim ) ;
277 #endif
278 for( jj=0 ; jj < cmval ; jj++ ){
279 memcpy( VECTIM_PTR(vim[nvim],jj), tar+kim[jj]*nts, sizeof(float)*nts ) ;
280 }
281 mri_free(tim) ; free(kim) ; free(idepth) ; free(depth) ;
282 }
283 }
284 break ;
285
286 case ORDER_LJ:{ /* == by Ljung-Box statistic [05 Feb 2020] */
287 MRI_IMAGE *tim = mri_new(nts,cmval,MRI_float) , *pim ;
288 Aint *kim = (Aint *)malloc(sizeof(Aint)*cmval) ;
289 float *tar = MRI_FLOAT_PTR(tim), *par ;
290 #if 0
291 ININFO_message(" Computing LJ order for mask partition #%d - %d voxels",
292 ii,cmval) ;
293 #endif
294 /* copy data into temporary image */
295 for( jj=0 ; jj < cmval ; jj++ ){
296 memcpy( tar+jj*nts, VECTIM_PTR(vim[nvim],jj), sizeof(float)*nts ) ;
297 kim[jj] = jj ; /* source index */
298 }
299 /* make the PVmap */
300 pim = mri_vec_to_ljmap(tim) ; par = MRI_FLOAT_PTR(pim) ;
301 /* sort so largest are first, keeping track of whence they came */
302 for( jj=0 ; jj < cmval ; jj++ ) par[jj] = -par[jj] ;
303 #if Aintsize == 64
304 qsort_floatint64_t( cmval , par , kim ) ;
305 #else
306 qsort_floatint( cmval , par , kim ) ;
307 #endif
308 /* copy from temp image back to vectim, in the right order */
309 for( jj=0 ; jj < cmval ; jj++ ){
310 memcpy( VECTIM_PTR(vim[nvim],jj), tar+kim[jj]*nts, sizeof(float)*nts ) ;
311 }
312 mri_free(tim) ; free(kim) ; mri_free(pim) ; /* toss the trash */
313 }
314 break ;
315
316 }
317
318 lev_siz[nvim] = cmval ;
319 nvim++ ;
320 }
321 }
322 free(tmask) ;
323 if( fit != NULL ) free(fit) ;
324 DSET_unload(dset) ;
325
326 lev_num = nvim ;
327
328 if( nvim == 0 ) RETURN(NULL) ;
329
330 /* glue multiple level vectims into 1, if needed */
331
332 if( nvim == 1 ){
333 vout = vim[0] ;
334 } else {
335 vout = THD_xyzcat_vectims( nvim , vim ) ;
336 for( ii=0 ; ii < nvim ; ii++ ) VECTIM_destroy(vim[ii]) ;
337 }
338
339 free(vim) ;
340 RETURN(vout) ;
341 }
342
343 /*--------------------------------------------------------------------------*/
344 /* resample a 1D array to a finer (nout > nin) or coarser (nout < nin) grid */
345 /*--------------------------------------------------------------------------*/
346
347 #undef P_M1 /* cubic interpolation polynomials */
348 #undef P_00
349 #undef P_P1
350 #undef P_P2
351 #undef P_FACTOR
352 #define P_M1(x) (-(x)*((x)-1.0f)*((x)-2.0f))
353 #define P_00(x) (3.0f*((x)+1.0f)*((x)-1.0f)*((x)-2.0f))
354 #define P_P1(x) (-3.0f*(x)*((x)+1.0f)*((x)-2.0f))
355 #define P_P2(x) ((x)*((x)+1.0f)*((x)-1.0f))
356 #define P_FACTOR 0.16666667f /* 1/6 = final scaling factor */
357
358 /* M3(x) = minimum sidelobe 3 term window (has no catchy name) */
359 #undef PIF
360 #define PIF 3.1415927f /* PI in float */
361 #undef M3
362 #define M3(x) (0.4243801f+0.4973406f*cosf(PIF*(x))+0.0782793f*cosf(PIF*(x)*2.0f))
363
resample_1D_float(int nin,float * xin,int nout,float * xout)364 static void resample_1D_float( int nin, float *xin, int nout, float *xout )
365 {
366 int ii , jj , nin1,nout1 , jbot,jtop ;
367 float ffac , fjmid , fj ;
368
369 ENTRY("resample_1D_float") ;
370
371 if( nin < 2 || xin == NULL || nout < 2 || xout == NULL ) EXRETURN ;
372
373 /* nothing to do? */
374
375 if( nin == nout ){
376 memcpy( xout , xin , sizeof(float)*nin) ;
377 EXRETURN ;
378 }
379
380 ffac = (nin-1.0f)/(nout-1.0f) ;
381 nout1 = nout-1 ;
382 nin1 = nin -1 ;
383
384 if( ffac < 1.0f ){ /*----- interpolate to finer grid (nout > nin) -----*/
385
386 xout[0] = xin[0] ;
387 xout[nout1] = xin[nin1] ;
388
389 if( use_old_resam ){ /* OLD = linear interp */
390
391 for( ii=1 ; ii < nout1 ; ii++ ){
392 fjmid = ii*ffac ;
393 jbot = (int)fjmid ; fj = (fjmid-jbot) ;
394 xout[ii] = (1.0f-fj)*xin[jbot] + fj*xin[jbot+1] ;
395 }
396
397 } else { /* NEW = cubic interp [Aug 2018] */
398
399 int jx , jx_m1,jx_00,jx_p1,jx_p2 ; float fx , wt_m1,wt_00,wt_p1,wt_p2 ;
400 for( ii=1 ; ii < nout1 ; ii++ ){
401 fjmid = ii*ffac ; jx = floorf(fjmid) ; fx = fjmid-jx ;
402 jx_m1 = jx-1 ; jx_00 = jx ; jx_p1 = jx+1 ; jx_p2 = jx+2 ;
403 if( jx_m1 < 0 ) jx_m1 = 0 ;
404 if( jx_m1 > nin1 ) jx_m1 = nin1 ;
405 if( jx_00 > nin1 ) jx_00 = nin1 ;
406 if( jx_p1 > nin1 ) jx_p1 = nin1 ;
407 if( jx_p2 > nin1 ) jx_p2 = nin1 ;
408 wt_m1 = P_M1(fx) ; wt_00 = P_00(fx) ; /* interpolation weights */
409 wt_p1 = P_P1(fx) ; wt_p2 = P_P2(fx) ;
410 xout[ii] = ( wt_m1*xin[jx_m1] + wt_00*xin[jx_00]
411 + wt_p1*xin[jx_p1] + wt_p2*xin[jx_p2] ) * P_FACTOR ;
412 }
413
414 }
415
416 } else { /*---------- coarser grid (nout < nin) ----------*/
417
418 if( use_old_resam ){ /* OLD = not so hot :( */
419
420 int npm = (int)rintf(ffac) ;
421 float *xq = (float *)malloc(sizeof(float)*nin) ;
422 float *wt = (float *)malloc(sizeof(float)*(npm+1)) ;
423 float sum, wsum , ww ;
424 memcpy( xq , xin , sizeof(float)*nin) ;
425 for( jj=0 ; jj <= npm ; jj++ ) wt[jj] = (npm-jj+0.5f) ;
426 for( ii=0 ; ii < nout ; ii++ ){
427 jbot = ii-npm ; if( jbot < 0 ) jbot = 0 ;
428 jtop = ii+npm ; if( jtop > nout1 ) jbot = nout1 ;
429 sum = wsum = 0.0f ;
430 for( jj=jbot ; jj <= jtop ; jj++ ){
431 ww = wt[abs(jj-ii)] ; wsum += ww ; sum += ww*xin[jj] ;
432 }
433 xq[ii] = sum / wsum ;
434 }
435 free(wt) ;
436 xout[0] = xq[0] ;
437 xout[nout-1] = xq[nin-1] ;
438 for( ii=1 ; ii < nout1 ; ii++ ){
439 fjmid = ii*ffac ;
440 jbot = (int)fjmid ; fj = (fjmid-jbot) ;
441 xout[ii] = (1.0f-fj)*xq[jbot] + fj*xq[jbot+1] ;
442 }
443 free(xq) ;
444
445 } else { /*---------- new coarsening method [Aug 2018] ----------*/
446
447 int npm = (int)rintf(0.555f*ffac+0.333f) , jmid ;
448 float sum, wsum , ww ;
449 static float *wt=NULL ; static int nwt=0 ;
450
451 if( npm < 1 ) npm = 1 ;
452
453 if( npm != nwt ){ /* recompute taper function, if needed */
454 nwt = npm ;
455 wt = (float *)realloc(wt,sizeof(float)*(nwt+2)) ;
456 #if 1
457 sum = (npm+0.111f) ;
458 wsum = 1.0f / sum ;
459 for( jj=0 ; jj <= npm ; jj++ ){
460 ww = (sum-jj)*wsum ; wt[jj] = M3(ww) ;
461 }
462 #else
463 for( jj=0 ; jj <= npm ; jj++ ) wt[jj] = (npm-jj+0.111f) ;
464 #endif
465 }
466
467 /* resample to coarser grid, averaging over neighbors */
468
469 for( ii=0 ; ii < nout ; ii++ ){
470 jmid = (int)rintf(ii*ffac) ; /* index mapped from xout back to xin */
471 jbot = jmid - npm ; if( jbot < 0 ) jbot = 0 ; /* averaging range */
472 jtop = jmid + npm ; if( jtop > nin1 ) jtop = nin1 ;
473 sum = wsum = 0.0f ;
474 for( jj=jbot ; jj <= jtop ; jj++ ){ /* do the local averaging */
475 ww = wt[abs(jj-jmid)] ; wsum += ww ; sum += ww*xin[jj] ;
476 }
477 xout[ii] = sum / wsum ;
478 }
479
480 }
481 }
482
483 EXRETURN ;
484 }
485
486 /*--------------------------------------------------------------------------*/
487 /* Convert a vectim to a 2D grayplot */
488 /*--------------------------------------------------------------------------*/
489
mri_vectim_to_grayplot(MRI_vectim * imts,int nx,int ny)490 static MRI_IMAGE * mri_vectim_to_grayplot( MRI_vectim *imts, int nx, int ny )
491 {
492 int nxx=nx , nyy=ny , ntt,nss , ii,jj , domid=0 ;
493 MRI_IMAGE *imout ; byte *outar ;
494 MRI_IMAGE *imttt ; byte *tttar , *tar ;
495 float zbot,ztop , val , zfac , *qar , *zar=NULL , *yar ;
496
497 ENTRY("mri_vectim_to_grayplot") ;
498
499 if( imts == NULL ) RETURN(NULL) ;
500
501 if( nxx < 512 ) nxx = 512 ; else if( nxx > 32768 ) nxx = 32768 ;
502 if( nyy < 256 ) nyy = 256 ; else if( nyy > 32768 ) nyy = 32768 ;
503
504 ntt = imts->nvals ;
505 nss = imts->nvec ; if( ntt < 19 || nss < 19 ) RETURN(NULL) ;
506
507 /* find min and max of pre-processed data */
508
509 if ( DO_RAW_RANGE ){
510 zbot = raw_range_bot;
511 ztop = raw_range_top;
512 INFO_message("Do raw range: [%f, %f]", zbot, ztop);
513 }
514 else{
515 zbot = 6.66e+33 ; ztop = -zbot ;
516 for( jj=0 ; jj < nss ; jj++ ){
517 qar = VECTIM_PTR(imts,jj) ;
518 for( ii=0 ; ii < ntt ; ii++ ){
519 if( qar[ii] < zbot ) zbot = qar[ii] ;
520 else if( qar[ii] > ztop ) ztop = qar[ii] ;
521 }
522 }
523 }
524
525 if( zbot >= ztop ) RETURN(NULL) ;
526 if( grange <= 0.0f ){
527 zfac = 255.4f / (ztop-zbot) ; domid = 0 ;
528 } else {
529 zfac = 127.7f / grange ; domid = 1 ;
530 }
531
532 imttt = mri_new( nxx,nss , MRI_byte ) ;
533 tttar = MRI_BYTE_PTR(imttt) ;
534
535 if( nxx != ntt ){
536 zar = malloc(sizeof(float)*nxx) ;
537 }
538
539 /* process each time series from vectim (each row of output) */
540
541 for( jj=0 ; jj < nss ; jj++ ){
542 qar = VECTIM_PTR(imts,jj) ;
543 if( zar != NULL ){ /* change length of this time series? */
544 resample_1D_float( ntt,qar , nxx,zar ) ; yar = zar ;
545 } else {
546 yar = qar ;
547 }
548 tar = tttar + jj*nxx ;
549 for( ii=0 ; ii < nxx ; ii++ ){ /* scale to 0..255 range */
550 if( domid ){
551 val = 127.7 + yar[ii]*zfac ;
552 } else {
553 val = (yar[ii]-zbot)*zfac ;
554 }
555 tar[ii] = BYTEIZE(val) ;
556 }
557 }
558 if( zar != NULL ){ free(zar); zar = NULL; }
559
560 if( nss == nyy ){ /* number of rows we have == number of rows we want? */
561 RETURN(imttt) ;
562 }
563
564 /* convert number of rows we have (nss) to number we want (nyy) */
565
566 imout = mri_new( nxx,nyy , MRI_byte ) ;
567 outar = MRI_BYTE_PTR(imout) ;
568
569 yar = (float *)malloc(sizeof(float)*nss) ;
570 zar = (float *)malloc(sizeof(float)*nyy) ;
571
572 for( ii=0 ; ii < nxx ; ii++ ){ /* resize each column */
573 for( jj=0 ; jj < nss ; jj++ )
574 yar[jj] = tttar[ii+jj*nxx] ;
575 resample_1D_float( nss,yar , nyy,zar ) ;
576 for( jj=0 ; jj < nyy ; jj++ )
577 outar[ii+jj*nxx] = BYTEIZE(zar[jj]) ;
578 }
579
580 free(zar) ; free(yar) ; mri_free(imttt) ;
581
582 /* draw dashed lines between the levels from mmask */
583
584 if( lev_num > 1 ){
585 float yfac ; int kk , rr ; byte qq=1 ;
586 for( kk=1 ; kk < lev_num ; kk++ ) lev_siz[kk] += lev_siz[kk-1] ;
587 yfac = nyy / (float)nss ;
588 for( kk=0 ; kk < lev_num-1 ; kk++ ){
589 jj = (int)rintf( yfac * lev_siz[kk] ) ;
590 if( jj <= 0 ) jj = 1 ; else if( jj >= nyy-1 ) jj = nyy-2 ;
591 for( ii=0 ; ii < nxx ; ii++ ){
592 rr = ii%19 ;
593 if( rr < 9 ) outar[ii+jj*nxx] = outar[ii+(jj-1)*nxx] = qq ;
594 else if( rr == 18 ) qq = 255u-qq ;
595 }
596 }
597 }
598
599 RETURN(imout) ;
600 }
601
602 /*--------------------------------------------------------------------------*/
603 /* Convert a 3D+time dataset to a grayplot, for fun and profit(?). */
604 /*--------------------------------------------------------------------------*/
605
THD_dset_to_grayplot(THD_3dim_dataset * dset,byte * mmask,int nxout,int nyout,int polort,float fwhm)606 MRI_IMAGE * THD_dset_to_grayplot( THD_3dim_dataset *dset ,
607 byte *mmask ,
608 int nxout , int nyout ,
609 int polort , float fwhm )
610 {
611 MRI_vectim *vim ; MRI_IMAGE *imout ;
612
613 ENTRY("THD_dset_to_grayplot") ;
614
615 vim = THD_dset_grayplot_prep( dset , mmask , polort , fwhm ) ;
616
617 if( nxout < 128 ) nxout = 1024 ;
618 if( nyout < 64 ) nyout = 512 ;
619
620 imout = mri_vectim_to_grayplot( vim, nxout , nyout ) ;
621
622 VECTIM_destroy(vim) ;
623
624 RETURN(imout) ;
625 }
626