1 /*
2   Teem: Tools to process and visualize scientific data and images             .
3   Copyright (C) 2012, 2011, 2010, 2009  University of Chicago
4   Copyright (C) 2008, 2007, 2006, 2005  Gordon Kindlmann
5   Copyright (C) 2004, 2003, 2002, 2001, 2000, 1999, 1998  University of Utah
6 
7   This library is free software; you can redistribute it and/or
8   modify it under the terms of the GNU Lesser General Public License
9   (LGPL) as published by the Free Software Foundation; either
10   version 2.1 of the License, or (at your option) any later version.
11   The terms of redistributing and/or modifying this software also
12   include exceptions to the LGPL that facilitate static linking.
13 
14   This library is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17   Lesser General Public License for more details.
18 
19   You should have received a copy of the GNU Lesser General Public License
20   along with this library; if not, write to Free Software Foundation, Inc.,
21   51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
22 */
23 
24 #include "gage.h"
25 #include "privateGage.h"
26 
27 /*
28 ** sets the filter sample location (fsl) array based on fractional
29 ** probe location ctx->point->frac
30 **
31 ** One possible rare surpise: if a filter is not continuous with 0
32 ** at the end of its support, and if the sample location is at the
33 ** highest possible point (xi == N-2, xf = 1.0), then the filter
34 ** weights may not be the desired ones.  Forward differencing (via
35 ** nrrdKernelForwDiff) is a good example of this.
36 */
37 void
_gageFslSet(gageContext * ctx)38 _gageFslSet(gageContext *ctx) {
39   int fr, i;
40   double *fslx, *fsly, *fslz;
41   double xf, yf, zf;
42 
43   fr = ctx->radius;
44   fslx = ctx->fsl + 0*2*fr;
45   fsly = ctx->fsl + 1*2*fr;
46   fslz = ctx->fsl + 2*2*fr;
47   xf = ctx->point.frac[0];
48   yf = ctx->point.frac[1];
49   zf = ctx->point.frac[2];
50   switch (fr) {
51   case 1:
52     fslx[0] = xf; fslx[1] = xf-1;
53     fsly[0] = yf; fsly[1] = yf-1;
54     fslz[0] = zf; fslz[1] = zf-1;
55     break;
56   case 2:
57     fslx[0] = xf+1; fslx[1] = xf; fslx[2] = xf-1; fslx[3] = xf-2;
58     fsly[0] = yf+1; fsly[1] = yf; fsly[2] = yf-1; fsly[3] = yf-2;
59     fslz[0] = zf+1; fslz[1] = zf; fslz[2] = zf-1; fslz[3] = zf-2;
60     break;
61   default:
62     /* filter radius bigger than 2 */
63     for (i=-fr+1; i<=fr; i++) {
64       fslx[i+fr-1] = xf-i;
65       fsly[i+fr-1] = yf-i;
66       fslz[i+fr-1] = zf-i;
67     }
68     break;
69   }
70   return;
71 }
72 
73 /*
74 ** renormalize weights of a reconstruction kernel with
75 ** constraint: the sum of the weights must equal the continuous
76 ** integral of the kernel
77 */
78 void
_gageFwValueRenormalize(gageContext * ctx,int wch)79 _gageFwValueRenormalize(gageContext *ctx, int wch) {
80   double integral, sumX, sumY, sumZ, *fwX, *fwY, *fwZ;
81   int i, fd;
82 
83   fd = 2*ctx->radius;
84   fwX = ctx->fw + 0 + fd*(0 + 3*wch);
85   fwY = ctx->fw + 0 + fd*(1 + 3*wch);
86   fwZ = ctx->fw + 0 + fd*(2 + 3*wch);
87   integral = ctx->ksp[wch]->kernel->integral(ctx->ksp[wch]->parm);
88   sumX = sumY = sumZ = 0;
89   for (i=0; i<fd; i++) {
90     sumX += fwX[i];
91     sumY += fwY[i];
92     sumZ += fwZ[i];
93   }
94   for (i=0; i<fd; i++) {
95     fwX[i] *= integral/sumX;
96     fwY[i] *= integral/sumY;
97     fwZ[i] *= integral/sumZ;
98   }
99   return;
100 }
101 
102 /*
103 ** renormalize weights of a derivative kernel with
104 ** constraint: the sum of the weights must be zero, but
105 ** sign of individual weights must be preserved
106 */
107 void
_gageFwDerivRenormalize(gageContext * ctx,int wch)108 _gageFwDerivRenormalize(gageContext *ctx, int wch) {
109   char me[]="_gageFwDerivRenormalize";
110   double negX, negY, negZ, posX, posY, posZ, fixX, fixY, fixZ,
111     *fwX, *fwY, *fwZ;
112   int i, fd;
113 
114   fd = 2*ctx->radius;
115   fwX = ctx->fw + 0 + fd*(0 + 3*wch);
116   fwY = ctx->fw + 0 + fd*(1 + 3*wch);
117   fwZ = ctx->fw + 0 + fd*(2 + 3*wch);
118   negX = negY = negZ = 0;
119   posX = posY = posZ = 0;
120   for (i=0; i<fd; i++) {
121     if (fwX[i] <= 0) { negX += -fwX[i]; } else { posX += fwX[i]; }
122     if (fwY[i] <= 0) { negY += -fwY[i]; } else { posY += fwY[i]; }
123     if (fwZ[i] <= 0) { negZ += -fwZ[i]; } else { posZ += fwZ[i]; }
124   }
125   /* fix is the sqrt() of factor by which the positive values
126      are too big.  negative values are scaled up by fix;
127      positive values are scaled down by fix */
128   fixX = sqrt(posX/negX);
129   fixY = sqrt(posY/negY);
130   fixZ = sqrt(posZ/negZ);
131   if (ctx->verbose > 2) {
132     fprintf(stderr, "%s: fixX = % 10.4f, fixY = % 10.4f, fixX = % 10.4f\n",
133             me, (float)fixX, (float)fixY, (float)fixZ);
134   }
135   for (i=0; i<fd; i++) {
136     if (fwX[i] <= 0) { fwX[i] *= fixX; } else { fwX[i] /= fixX; }
137     if (fwY[i] <= 0) { fwY[i] *= fixY; } else { fwY[i] /= fixY; }
138     if (fwZ[i] <= 0) { fwZ[i] *= fixZ; } else { fwZ[i] /= fixZ; }
139   }
140   return;
141 }
142 
143 void
_gageFwSet(gageContext * ctx,unsigned int sidx,double sfrac)144 _gageFwSet(gageContext *ctx, unsigned int sidx, double sfrac) {
145   char me[]="_gageFwSet";
146   int kidx;
147   unsigned int fd;
148 
149   fd = 2*ctx->radius;
150   for (kidx=gageKernelUnknown+1; kidx<gageKernelLast; kidx++) {
151     if (!ctx->needK[kidx] || kidx==gageKernelStack) {
152       continue;
153     }
154     /* we evaluate weights for all three axes with one call */
155     ctx->ksp[kidx]->kernel->evalN_d(ctx->fw + fd*3*kidx, ctx->fsl,
156                                     fd*3, ctx->ksp[kidx]->parm);
157   }
158 
159   if (ctx->verbose > 2) {
160     fprintf(stderr, "%s: filter weights after kernel evaluation:\n", me);
161     _gagePrint_fslw(stderr, ctx);
162   }
163   if (ctx->parm.renormalize) {
164     for (kidx=gageKernelUnknown+1; kidx<gageKernelLast; kidx++) {
165       if (!ctx->needK[kidx] || kidx==gageKernelStack) {
166         continue;
167       }
168       switch (kidx) {
169       case gageKernel00:
170       case gageKernel10:
171       case gageKernel20:
172         _gageFwValueRenormalize(ctx, kidx);
173         break;
174       default:
175         _gageFwDerivRenormalize(ctx, kidx);
176         break;
177       }
178     }
179     if (ctx->verbose > 2) {
180       fprintf(stderr, "%s: filter weights after renormalization:\n", me);
181       _gagePrint_fslw(stderr, ctx);
182     }
183   }
184 
185   if (ctx->parm.stackUse && ctx->parm.stackNormalizeDeriv) {
186     unsigned int jj;
187     double scl, norm, *fwX, *fwY, *fwZ;
188 
189     scl = AIR_AFFINE(0.0, sfrac, 1.0,
190                      ctx->stackPos[sidx],
191                      ctx->stackPos[sidx+1]);
192 #if 0
193     double (*dgeval)(double x, const double *parm),
194       dgparm[2] = {0, 3};
195     dgeval = nrrdKernelDiscreteGaussian->eval1_d;
196     dgparm[0] = scl;
197     /* from Eq. (120) in T. Lindeberg. "Feature Detection with Automatic
198        Scale Selection." International Journal of Computer Vision,
199        1998, 30, 77-116 */
200     /* 0.7978845608 ~= sqrt(2)/sqrt(pi) */
201     norm = 0.7978845608/(dgeval(0.0, dgparm) + dgeval(1.0, dgparm));
202 #endif
203     /* really simple; no lindeberg normalization, possible bias */
204     norm = scl + ctx->parm.stackNormalizeDerivBias;
205 
206     fd = 2*ctx->radius;
207     kidx = gageKernel11;
208     fwX = ctx->fw + 0 + fd*(0 + 3*kidx);
209     fwY = ctx->fw + 0 + fd*(1 + 3*kidx);
210     fwZ = ctx->fw + 0 + fd*(2 + 3*kidx);
211     for (jj=0; jj<fd; jj++) {
212       fwX[jj] *= norm;
213       fwY[jj] *= norm;
214       fwZ[jj] *= norm;
215     }
216     kidx = gageKernel22;
217     fwX = ctx->fw + 0 + fd*(0 + 3*kidx);
218     fwY = ctx->fw + 0 + fd*(1 + 3*kidx);
219     fwZ = ctx->fw + 0 + fd*(2 + 3*kidx);
220     for (jj=0; jj<fd; jj++) {
221       fwX[jj] *= norm*norm;
222       fwY[jj] *= norm*norm;
223       fwZ[jj] *= norm*norm;
224     }
225   }
226 
227   return;
228 }
229 
230 /*
231 ** _gageLocationSet
232 **
233 ** updates probe location in general context, and things which
234 ** depend on it:
235 ** fsl, fw
236 **
237 ** (_xi,_yi,_zi) is *index* space position in the volume
238 ** _si is the index-space position in the stack, the value is ignored
239 ** if there is no stack behavior
240 **
241 ** does NOT use biff, but returns 1 on error and 0 if all okay
242 ** Currently only error is probing outside volume, which sets
243 ** ctx->errNum and sprints message into ctx->errStr.
244 */
245 int
_gageLocationSet(gageContext * ctx,double xif,double yif,double zif,double sif)246 _gageLocationSet(gageContext *ctx,
247                  double xif, double yif, double zif, double sif) {
248   char me[]="_gageProbeLocationSet";
249   unsigned int top[3],  /* "top" x, y, z: highest valid index in volume */
250     idx[4];
251   int sdiff;      /* computed integral positions in volume */
252   double frac[4], min, max[3];
253 
254   /* **** bounds checking **** */
255   top[0] = ctx->shape->size[0] - 1;
256   top[1] = ctx->shape->size[1] - 1;
257   top[2] = ctx->shape->size[2] - 1;
258   if (nrrdCenterNode == ctx->shape->center) {
259     min = 0;
260     max[0] = top[0];
261     max[1] = top[1];
262     max[2] = top[2];
263   } else {
264     min = -0.5;
265     max[0] = AIR_CAST(double, top[0]) + 0.5;
266     max[1] = AIR_CAST(double, top[1]) + 0.5;
267     max[2] = AIR_CAST(double, top[2]) + 0.5;
268   }
269   if (!( AIR_IN_CL(min, xif, max[0]) &&
270          AIR_IN_CL(min, yif, max[1]) &&
271          AIR_IN_CL(min, zif, max[2]) )) {
272     if (ctx->parm.generateErrStr) {
273       sprintf(ctx->errStr, "%s: position (%g,%g,%g) outside (%s-centered) "
274               "bounds [%g,%g]x[%g,%g]x[%g,%g]",
275               me, xif, yif, zif,
276               airEnumStr(nrrdCenter, ctx->shape->center),
277               min, max[0], min, max[1], min, max[2]);
278     } else {
279       strcpy(ctx->errStr, _GAGE_NON_ERR_STR);
280     }
281     ctx->errNum = gageErrBoundsSpace;
282     return 1;
283   }
284   if (ctx->parm.stackUse) {
285     if (!( AIR_IN_CL(0, sif, ctx->pvlNum-2) )) {
286       if (ctx->parm.generateErrStr) {
287         sprintf(ctx->errStr, "%s: stack position %g outside (%s-centered) "
288                 "bounds [0,%u]", me, sif,
289                 airEnumStr(nrrdCenter, nrrdCenterNode), ctx->pvlNum-2);
290       } else {
291         strcpy(ctx->errStr, _GAGE_NON_ERR_STR);
292       }
293       ctx->errNum = gageErrBoundsStack;
294       return 1;
295     }
296   }
297 
298   /* **** computing integral and fractional sample locations **** */
299   /* Thu Jan 14 19:46:53 CST 2010: detected that along the low edge
300      (next to sample 0) in cell centered, the rounding behavior of
301      AIR_CAST(unsigned int, xif), namely [-0.5,0] --> 0, meant that
302      the low edge was not treated symmetrically with the high edge.
303      This motivated the change from using idx to store the lower
304      corner of the containing voxel, to the upper corner.  So, the new
305      "idx" is always +1 of what the old idx was.  Code here and in
306      ctx.c (since idx is saved into ctx->point.idx) has been changed
307      accordingly */
308   ELL_3V_SET(idx,
309              AIR_CAST(unsigned int, xif+1), /* +1: see above */
310              AIR_CAST(unsigned int, yif+1),
311              AIR_CAST(unsigned int, zif+1));
312   if (ctx->verbose > 5) {
313     fprintf(stderr, "%s: (%g,%g,%g,%g) -%s-> mm [%g, %g/%g/%g]\n"
314             "        --> idx %u %u %u\n",
315             me, xif, yif, zif, sif,
316             airEnumStr(nrrdCenter, ctx->shape->center),
317             min, max[0], max[1], max[2], idx[0], idx[1], idx[2]);
318   }
319   /* these can only can kick in for node-centered, because that's when
320      max[] has an integral value */
321   idx[0] -= (idx[0]-1 == max[0]);
322   idx[1] -= (idx[1]-1 == max[1]);
323   idx[2] -= (idx[2]-1 == max[2]);
324   if (ctx->verbose > 5) {
325     fprintf(stderr, "%s:        ----> idx %u %u %u\n",
326             me, idx[0], idx[1], idx[2]);
327   }
328   ELL_3V_SET(frac,
329              xif - (AIR_CAST(float, idx[0])-1),
330              yif - (AIR_CAST(float, idx[1])-1),
331              zif - (AIR_CAST(float, idx[2])-1));
332   ELL_3V_COPY(ctx->point.idx, idx);  /* not idx[3], yet */
333   if (ctx->parm.stackUse) {
334     idx[3] = AIR_CAST(unsigned int, sif);
335     idx[3] -= (idx[3] == ctx->pvlNum-2);
336     frac[3] = sif - idx[3];
337     sdiff = (ctx->point.idx[3] + ctx->point.frac[3] != sif);
338   } else {
339     idx[3] = 0;
340     frac[3] = 0;
341     sdiff = AIR_FALSE;
342   }
343   if (ctx->verbose > 2) {
344     fprintf(stderr, "%s: \n"
345             "        pos (% 15.7f,% 15.7f,% 15.7f,% 15.7f) \n"
346             "        -> i(%5d,%5d,%5d,%5d) \n"
347             "         + f(% 15.7f,% 15.7f,% 15.7f,% 15.7f) \n",
348             me, xif, yif, zif, sif, idx[0], idx[1], idx[2], idx[3],
349             frac[0], frac[1], frac[2], frac[3]);
350   }
351 
352   /* **** compute *spatial* fsl and fw ****
353      these have to be reconsidered if anything changes about the
354      fractional spatial position, or (if no fractional spatial change),
355      movement along scale AND using normalization based on scale */
356   if ( ctx->point.frac[0] != frac[0]
357        || ctx->point.frac[1] != frac[1]
358        || ctx->point.frac[2] != frac[2]
359        || (ctx->parm.stackUse && sdiff && ctx->parm.stackNormalizeDeriv)) {
360     /* We don't yet record the scale position in ctx->point because
361        that's done below while setting stackFsl and stackFw. So, have
362        to pass stack pos info to _gageFwSet() */
363     ELL_3V_COPY(ctx->point.frac, frac);
364     /* these may take some time (especially if using renormalization),
365        hence the conditional above */
366     _gageFslSet(ctx);
367     _gageFwSet(ctx, idx[3], frac[3]);
368   }
369 
370   /* **** compute *stack* fsl and fw ****  */
371   if (ctx->verbose > 2 && ctx->parm.stackUse) {
372     fprintf(stderr, "%s: point.frac[3] %f + idx[3] %u = %f %s sif %f\n", me,
373             ctx->point.frac[3], ctx->point.idx[3],
374             ctx->point.frac[3] + ctx->point.idx[3],
375             (sdiff ? "*NOT ==*" : "=="), sif);
376   }
377   if (!ctx->parm.stackUse) {
378     ctx->point.idx[3] = idx[3];
379     ctx->point.frac[3] = frac[3];
380     ctx->point.stackFwNonZeroNum = 0;
381   } else if (sdiff) {
382     double sum;
383     unsigned int ii, nnz;
384     NrrdKernelSpec *sksp;
385 
386     /* node-centered sampling of stack indices from 0 to ctx->pvlNum-2 */
387     /* HEY: we really are quite far from implementing arbitrary
388        nrrdBoundary behaviors here, and centering is stuck on node! */
389     /* HEY: honestly, the whole idea that it still makes sense to do
390        low-level operations in index space, when the world-space locations
391        of the samples can be non-uniform, is a little suspect.  This is
392        all legit for nrrdKernelTent and nrrdKernelHermiteScaleSpaceFlag,
393        but is pretty fishy otherwise */
394     for (ii=0; ii<ctx->pvlNum-1; ii++) {
395       ctx->stackFsl[ii] = sif - ii;
396       if (ctx->verbose > 2) {
397         fprintf(stderr, "%s: ctx->stackFsl[%u] = %g\n",
398                 me, ii, ctx->stackFsl[ii]);
399       }
400     }
401     sksp = ctx->ksp[gageKernelStack];
402     sksp->kernel->evalN_d(ctx->stackFw, ctx->stackFsl,
403                           ctx->pvlNum-1, sksp->parm);
404     if (ctx->verbose > 2) {
405       for (ii=0; ii<ctx->pvlNum-1; ii++) {
406         fprintf(stderr, "%s: ctx->stackFw[%u] = %g\n",
407                 me, ii, ctx->stackFw[ii]);
408       }
409     }
410     /* compute stackFwNonZeroNum whether or not parm.stackNormalizeRecon! */
411     nnz = 0;
412     if (ctx->parm.stackNormalizeRecon) {
413       sum = 0;
414       for (ii=0; ii<ctx->pvlNum-1; ii++) {
415         nnz += !!ctx->stackFw[ii];
416         sum += ctx->stackFw[ii];
417       }
418       if (!sum) {
419         if (ctx->parm.generateErrStr) {
420           sprintf(ctx->errStr, "%s: integral of stackFw[] is zero; "
421                   "can't do stack reconstruction", me);
422         } else {
423           strcpy(ctx->errStr, _GAGE_NON_ERR_STR);
424         }
425         ctx->errNum = gageErrStackIntegral;
426         return 1;
427       }
428       for (ii=0; ii<ctx->pvlNum-1; ii++) {
429         ctx->stackFw[ii] /= sum;
430       }
431       if (ctx->verbose > 2) {
432         for (ii=0; ii<ctx->pvlNum-1; ii++) {
433           fprintf(stderr, "%s: ctx->stackFw[%u] = %g\n", me,
434                   ii, ctx->stackFw[ii]);
435         }
436       }
437     } else {
438       for (ii=0; ii<ctx->pvlNum-1; ii++) {
439         nnz += !!ctx->stackFw[ii];
440       }
441       if (!nnz) {
442         if (ctx->parm.generateErrStr) {
443           sprintf(ctx->errStr, "%s: all stackFw[] weights are zero; "
444                   "can't do stack reconstruction", me);
445         } else {
446           strcpy(ctx->errStr, _GAGE_NON_ERR_STR);
447         }
448         ctx->errNum = gageErrStackIntegral;
449         return 1;
450       }
451     }
452 
453     ctx->point.idx[3] = idx[3];
454     ctx->point.frac[3] = frac[3];
455     ctx->point.stackFwNonZeroNum = nnz;
456   }
457 
458   return 0;
459 }
460