1 /* $Id: njn_localmaxstatutil.cpp 462397 2015-03-18 16:23:53Z vasilche $
2 * ===========================================================================
3 *
4 *                            PUBLIC DOMAIN NOTICE
5 *               National Center for Biotechnology Information
6 *
7 *  This software/database is a "United States Government Work" under the
8 *  terms of the United States Copyright Act.  It was written as part of
9 *  the author's offical duties as a United States Government employee and
10 *  thus cannot be copyrighted.  This software/database is freely available
11 *  to the public for use. The National Library of Medicine and the U.S.
12 *  Government have not placed any restriction on its use or reproduction.
13 *
14 *  Although all reasonable efforts have been taken to ensure the accuracy
15 *  and reliability of the software and data, the NLM and the U.S.
16 *  Government do not and cannot warrant the performance or results that
17 *  may be obtained by using this software or data. The NLM and the U.S.
18 *  Government disclaim all warranties, express or implied, including
19 *  warranties of performance, merchantability or fitness for any particular
20 *  purpose.
21 *
22 *  Please cite the author in any work or product based on this material.
23 *
24 * ===========================================================================*/
25 
26 /*****************************************************************************
27 
28 File name: njn_localmaxstatutil.cpp
29 
30 Author: John Spouge
31 
32 Contents:
33 
34 ******************************************************************************/
35 
36 #include <ncbi_pch.hpp>
37 
38 #include "njn_localmaxstatutil.hpp"
39 #include "njn_approx.hpp"
40 #include "njn_dynprogproblim.hpp"
41 #include "njn_integer.hpp"
42 #include "njn_memutil.hpp"
43 #include "njn_root.hpp"
44 
45 #include "sls_alp_data.hpp"
46 
47 // Kludge: limit optimization by ICC 10.x to avoid undesired references to
48 // __svml_exp2 (problematic to use from DLLs on x86_64 or at all on ia32).
49 #if defined(NCBI_COMPILER_ICC)  &&  defined(__OPTIMIZE__) \
50     &&  NCBI_COMPILER_VERSION >= 1000  &&  NCBI_COMPILER_VERSION < 1100
51 #  define NEED_ICC_OPTIMIZATION_LIMITS 1
52 #endif
53 
54 USING_NCBI_SCOPE;
55 USING_SCOPE(blast);
56 USING_SCOPE(Njn);
57 
flatten(size_t dimension_,const Int4 * const * scoreMatrix_,const double * const * prob_,size_t * dim_,Int4 ** score_,double ** p_,size_t dimension2_)58 void LocalMaxStatUtil::flatten ( // allocates memory for linear probabilities and scores
59 size_t dimension_, // dimension of equilProb_
60 const Int4 *const *scoreMatrix_, // packed scoring matrix [0...dimension_)[0...dimension2_)
61 const double *const *prob_, // prob_ [0...dimension_)[0...dimension2_) : distribution of scores sum to 1.0
62 size_t *dim_, // dimension of p_
63 Int4 **score_, // score [0...dim_) in increasing order
64 double **p_, // linear p_ [0...dim_) : distribution of scores
65 size_t dimension2_) // dimension2 of equilProb_
66 {
67     if (dimension2_ == 0) dimension2_ = dimension_;
68 
69     size_t i = 0;
70     size_t j = 0;
71 
72     double sum = 0.0;
73 
74     for (i = 0; i < dimension_; i++)
75     {
76       for (j = 0; j < dimension2_; j++)
77       {
78          sum += prob_ [i][j];
79       }
80     }
81 
82 #ifdef _DEBUG /* to eliminate compiler warning in release mode */
83     const double FUDGE = 20.0;
84 #endif
85     _ASSERT(Approx::relApprox (sum, 1.0, FUDGE * REL_TOL));
86 
87     Int4 s = 0;
88     Int4 min = scoreMatrix_[0][0];
89     Int4 max = scoreMatrix_[0][0];
90 
91     for (i = 0; i < dimension_; i++)
92     {
93       for (j = 0; j < dimension2_; j++)
94       {
95          if (scoreMatrix_ [i][j] < min)
96              min = scoreMatrix_ [i][j];
97          else if (max < scoreMatrix_ [i][j])
98              max = scoreMatrix_ [i][j];
99       }
100     }
101 
102     assert (min <= max);
103 
104     size_t dim = static_cast <size_t> (max - min + 1);
105     double *p = new double [dim];
106     for (i = 0; i < dim; i++) p [i] = 0.0;
107 
108     for (i = 0; i < dimension_; i++)
109     {
110       for (j = 0; j < dimension2_; j++)
111       {
112          p [scoreMatrix_ [i][j] - min] += prob_ [i][j];
113       }
114     }
115 
116     *dim_ = 0;
117 
118     for (s = min; s <= max; s++)
119     {
120       if (0.0 < p [s - min]) ++*dim_;
121     }
122 
123     *p_ = new double [*dim_];
124     *score_ = new Int4 [*dim_];
125     *dim_ = 0;
126 
127     for (s = min; s <= max; s++)
128     {
129       if (0.0 < p [s - min]) {
130          (*score_) [*dim_] = s;
131          (*p_) [*dim_] = p [s - min];
132          ++*dim_;
133       }
134     }
135 
136     delete [] p; p = 0;
137 }
138 
lambda(size_t dimension_,const Int4 * const * scoreMatrix_,const double * q_)139 double LocalMaxStatUtil::lambda (
140 size_t dimension_, // dimension of equilProb_
141 const Int4 *const *scoreMatrix_, // packed scoring matrix [0...dimension_)[0...dimension_)
142 const double *q_) // q_ [0...dimension_) : distribution of independent letters
143 {
144     size_t i = 0;
145     size_t j = 0;
146 
147    double **prob = MemUtil::newMatrix <double> (dimension_, dimension_);
148 
149     for (i = 0; i < dimension_; i++)
150     {
151         for (j = 0; j < dimension_; j++)
152         {
153             prob [i][j] = q_ [i] * q_ [j];
154         }
155     }
156 
157     size_t dim = 0;
158     Int4 *score = 0;
159     double *p = 0;
160 
161     flatten (dimension_, scoreMatrix_, prob, &dim, &score, &p);
162 
163     MemUtil::deleteMatrix <double> (prob, dimension_, dimension_); prob = 0;
164 
165     double lambdaHat = LocalMaxStatUtil::lambda (dim, score, p);
166 
167     delete [] p; p = 0;
168     delete [] score; score = 0;
169 
170     return lambdaHat;
171 }
172 
173 
174    size_t n_dimension = 0; // dimension of matrices
175    const Int4 *n_score = 0; // score_ [0...dimension_ - 1]
176    const double *n_prob = 0; // prob_ [0...dimension_ - 1]
177    Int4 n_morgue = 0; // score_ [0] - 1
178    Int4 n_entry = 0; // n_entry = 0 : weak descending ladder epoch ; n_entry = -1 : strict descending ladder epoch
179 
n_setParameters(size_t dimension_,const Int4 * score_,const double * prob_,Int4 entry_=0)180    void n_setParameters (
181    size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
182    const Int4 *score_, // scores
183    const double *prob_, // probabilities
184    Int4 entry_ = 0) // entry_ = 0 : weak descending ladder epoch ; entry_ = -1 : strict descending ladder epoch
185    {
186       n_dimension = dimension_;
187       n_score = score_;
188       n_prob = prob_;
189       n_morgue = score_ [0] - 1;
190       n_entry = entry_;
191    }
192 
193 #ifdef NEED_ICC_OPTIMIZATION_LIMITS
194 #  pragma optimization_level 1
195 #endif
n_totalProbAssoc(double x_)196    double n_totalProbAssoc (double x_)
197    {
198       double sum = 0.0;
199       for (size_t i = 0; i < n_dimension; i++) {
200          sum += n_prob [i] * exp (x_ * static_cast <double> (n_score [i]));
201       }
202       return sum;
203    }
204 
n_meanPowerAssoc(double x_,Int4 power_=1L)205    double n_meanPowerAssoc (double x_, Int4 power_ = 1L)
206    {
207       double sum = 0.0;
208       for (size_t i = 0; i < n_dimension; i++) {
209           sum += Integer::integerPower (static_cast <double> (n_score [i]), power_) *
210             n_prob [i] * exp (x_ * static_cast <double> (n_score [i]));
211       }
212       return sum;
213    }
214 
n_meanAssoc(double x_)215    double n_meanAssoc (double x_)
216    {
217       return n_meanPowerAssoc (x_);
218    }
219 
n_bracket(double * p_,double * q_)220    void n_bracket (double *p_, double *q_)
221    {
222       const double FACTOR = 0.5;
223       *p_ = -log (n_prob [n_dimension - 1]) / static_cast <double> (n_score [n_dimension - 1]);
224       while (1.0 <= n_totalProbAssoc (*p_)) {
225          *p_ *= FACTOR;
226       }
227       *q_ = *p_ / FACTOR;
228    }
229 
230 
mu(size_t dimension_,const Int4 * score_,const double * prob_)231 double LocalMaxStatUtil::mu (
232 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
233 const Int4 *score_, // scores in increasing order
234 const double *prob_) // corresponding probabilities
235 {
236    double mu = 0.0;
237    for (size_t i = 0; i < dimension_; i++) {
238       mu += static_cast <double> (score_ [i]) * prob_ [i];
239    }
240    return mu;
241 }
242 
lambda(size_t dimension_,const Int4 * score_,const double * prob_)243 double LocalMaxStatUtil::lambda (
244 size_t dimension_, // #(distinct values)
245 const Int4 *score_, // values
246 const double *prob_) // probability of corresponding value
247 {
248    n_setParameters (dimension_, score_, prob_);
249 
250    double p = 0.0;
251    double q = 0.0;
252 
253    n_bracket (&p, &q);
254 
255    return Root::bisection (1.0, n_totalProbAssoc, p, q, REL_TOL * fabs (p - q));
256 }
257 
muPowerAssoc(size_t dimension_,const Int4 * score_,const double * prob_,double lambda_,Int4 power_)258 double LocalMaxStatUtil::muPowerAssoc (
259 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
260 const Int4 *score_, // scores in increasing order
261 const double *prob_, // corresponding probabilities
262 double lambda_, // lambda
263 Int4 power_) // power
264 {
265    n_setParameters (dimension_, score_, prob_);
266 
267    if (lambda_ == 0.0) lambda_ = lambda (dimension_, score_, prob_);
268 
269    return n_meanPowerAssoc (lambda_, power_);
270 }
271 
muAssoc(size_t dimension_,const Int4 * score_,const double * prob_,double lambda_)272 double LocalMaxStatUtil::muAssoc (
273 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
274 const Int4 *score_, // scores in increasing order
275 const double *prob_, // corresponding probabilities
276 double lambda_) // lambda
277 {
278    return muPowerAssoc (dimension_, score_, prob_, lambda_);
279 }
280 
thetaMin(size_t dimension_,const Int4 * score_,const double * prob_,double lambda_)281 double LocalMaxStatUtil::thetaMin (
282 size_t dimension_, // #(distinct values)
283 const Int4 *score_, // values
284 const double *prob_, // probability of corresponding value
285 double lambda_) // lambda
286 // assumes logarithmic regime
287 {
288    n_setParameters (dimension_, score_, prob_);
289 
290    if (lambda_ == 0.0) lambda_ = lambda (dimension_, score_, prob_);
291 
292    double p = 0.0;
293    double q = 0.0;
294    n_bracket (&p, &q);
295    return Root::bisection (0.0, n_meanAssoc, 0.0, lambda_, REL_TOL * fabs (p - q));
296 }
297 
rMin(size_t dimension_,const Int4 * score_,const double * prob_,double lambda_,double thetaMin_)298 double LocalMaxStatUtil::rMin (
299 size_t dimension_, // #(distinct values)
300 const Int4 *score_, // values
301 const double *prob_, // probability of corresponding value
302 double lambda_, // lambda
303 double thetaMin_) // argument of rate
304 // assumes logarithmic regime
305 {
306    n_setParameters (dimension_, score_, prob_);
307 
308    if (thetaMin_ == 0.0) thetaMin_ = thetaMin (dimension_, score_, prob_, lambda_);
309 
310    return n_totalProbAssoc (thetaMin_);
311 }
312 
313 #ifdef NEED_ICC_OPTIMIZATION_LIMITS
314 #  pragma optimization_level 1
315 #endif
r(size_t dimension_,const Int4 * score_,const double * prob_,double theta_)316 double LocalMaxStatUtil::r ( // r (theta)
317 size_t dimension_, // #(distinct values)
318 const Int4 *score_, // scores in increasing order
319 const double *prob_, // probability of corresponding value
320 double theta_) // argument of rate
321 // assumes logarithmic regime
322 {
323    double sum = 0.0;
324    for (size_t i = 0; i < dimension_; i++) {
325       sum += prob_ [i] * exp (theta_ * static_cast <double> (score_ [i]));
326    }
327    return sum;
328 }
329 
delta(size_t dimension_,const Int4 * score_)330 Int4 LocalMaxStatUtil::delta ( // theta [minus delta] for ungapped sequence comparison
331 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
332 const Int4 *score_) // scores
333 {
334    size_t i = 0;
335 
336    Int4 delta = 0;
337    for (i = 0; i < dimension_; i++) {
338       delta = Integer::euclidAlgorithm <Int4> (delta, score_ [i]);
339    }
340    return delta;
341 }
342 
thetaMinusDelta(double lambda_,size_t dimension_,const Int4 * score_)343 double LocalMaxStatUtil::thetaMinusDelta ( // theta [minus delta] for ungapped sequence comparison
344 double lambda_, // lambda, the exponential rate for the local maximum
345 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
346 const Int4 *score_) // scores
347 {
348    double del = static_cast <double> (delta (dimension_, score_));
349    return (1.0 - exp (-lambda_ * del)) / del;
350 }
351 
352 
n_step(Int4 oldValue_,size_t state_)353    Int4 n_step (Int4 oldValue_, size_t state_)
354    {
355       assert (state_ < n_dimension);
356       return n_morgue < oldValue_ ? oldValue_ + n_score [state_] : oldValue_;
357    }
358 
n_bury(Int4 oldValue_,size_t state_)359    Int4 n_bury (Int4 oldValue_, size_t state_)
360    {
361       assert (state_ < n_dimension);
362       return n_entry < oldValue_ ? oldValue_ : n_morgue;
363    }
364 
365 
descendingLadderEpochRepeat(size_t dimension_,const Int4 * score_,const double * prob_,double * eSumAlpha_,double * eOneMinusExpSumAlpha_,bool isStrict_,double lambda_,size_t endW_,double * pAlphaW_,double * eOneMinusExpSumAlphaW_,double lambda0_,double mu0_,double muAssoc0_,double thetaMin0_,double rMin0_,double time_,bool * terminated_)366 void LocalMaxStatUtil::descendingLadderEpochRepeat (
367 size_t dimension_, // #(distinct values)
368 const Int4 *score_, // values
369 const double *prob_, // probability of corresponding value
370 double *eSumAlpha_, // expectation (sum [alpha])
371 double *eOneMinusExpSumAlpha_, // expectation [1.0 - exp (sum [alpha])]
372 bool isStrict_, // ? is this a strict descending ladder epoch
373 double lambda_, // lambda for repeats : default is lambda0_ below
374 size_t endW_, // maximum w plus 1
375 double *pAlphaW_, // probability {alpha = w} : pAlphaW_ [0, wEnd)
376 double *eOneMinusExpSumAlphaW_, // expectation [1.0 - exp (sum [alpha]); alpha = w] : eOneMinusExpSumAlphaW_ [0, wEnd)
377 double lambda0_, // lambda for flattened distribution (avoid recomputation)
378 double mu0_, // mean of flattened distribution (avoid recomputation)
379 double muAssoc0_, // mean of associated flattened distribution (avoid recomputation)
380 double thetaMin0_, // thetaMin of flattened distribution (avoid recomputation)
381 double rMin0_, // rMin of flattened distribution (avoid recomputation)
382 double time_, // get time for the dynamic programming computation
383 bool *terminated_) // ? Was the dynamic programming computation terminated prematurely ?
384 // assumes logarithmic regime
385 {
386     // Start dynamic programming probability calculation using notation in
387     //
388     // Mott R. and Tribe R. (1999)
389     // J. Computational Biology 6(1):91-112
390     //
391     // Karlin S. and Taylor H.M.(1981)
392     // A Second Course in Stochastic Processes, p. 480
393     //
394     // Note there is an error in Eq (6.19) there, which is corrected in Eq (6.20)
395     //
396     // This program uses departure into (-Inf, 0] not (-Inf, 0)
397 
398     // avoid recomputation
399 #ifdef _DEBUG /* to eliminate compiler warning in release mode */
400     double mu0 = 0.0 == mu0_ ? mu (dimension_, score_, prob_) : mu0_;
401 #endif
402     _ASSERT(mu0 < 0.0);
403     double lambda0 = 0.0 == lambda0_ ? lambda (dimension_, score_, prob_) : lambda0_;
404     _ASSERT (0.0 < lambda0);
405     if (lambda_ == 0.0) lambda_ = lambda0;
406     _ASSERT (0.0 < lambda_);
407 #ifdef _DEBUG /* to eliminate compiler warning in release mode */
408     double muAssoc0 = 0.0 == muAssoc0_ ? muAssoc (dimension_, score_, prob_, lambda0) : muAssoc0_;
409 #endif
410     _ASSERT (0.0 < muAssoc0);
411     double thetaMin0 = 0.0 == thetaMin0_ ? thetaMin (dimension_, score_, prob_, lambda0) : thetaMin0_;
412     _ASSERT (0.0 < thetaMin0);
413     double rMin0 = 0.0 == rMin0_ ? rMin (dimension_, score_, prob_, lambda0, thetaMin0) : rMin0_;
414     _ASSERT (0.0 < rMin0 && rMin0 < 1.0);
415 
416     const Int4 ITER_MIN = static_cast <Int4> ((log (REL_TOL * (1.0 - rMin0)) / log (rMin0)));
417     _ASSERT (0 < ITER_MIN);
418     const Int4 ITER = static_cast <Int4> (endW_) < ITER_MIN ? ITER_MIN : static_cast <Int4> (endW_);
419     _ASSERT (0 < ITER);
420     const Int4 Y_MAX = static_cast <Int4> (-log (REL_TOL) / lambda0);
421 
422     Int4 entry = isStrict_ ? -1 : 0;
423     n_setParameters (dimension_, score_, prob_, entry);
424 
425 
426     double time0 = 0.0;
427     double time1 = 0.0;
428         if (time_ > 0.0) Sls::alp_data::get_current_time (time0);
429 
430     DynProgProbLim dynProgProb (n_step, dimension_, prob_, score_ [0] - 1, Y_MAX);
431 
432     if (pAlphaW_) pAlphaW_ [0] = 0.0;
433     if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [0] = 0.0;
434 
435     dynProgProb.update (); // iterate random walk
436 
437     Int4 value = 0;
438 
439     if (eSumAlpha_) *eSumAlpha_ = 0.0;
440     if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ = 0.0;
441 
442     for (size_t w = 1; w < static_cast <size_t> (ITER); w++) {
443 
444         if (w < endW_) { // sum pAlphaW_ [w] and eOneMinusExpSumAlphaW_ [w]
445 
446              if (pAlphaW_) pAlphaW_ [w] = 0.0;
447              if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] = 0.0;
448 
449              for (value = score_ [0]; value <= entry; value++) {
450                 if (pAlphaW_) pAlphaW_ [w] += dynProgProb.getProb (value);
451                 if (eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] +=
452                                                dynProgProb.getProb (value) *
453                                                (1.0 - exp (lambda_ * static_cast <double> (value)));
454              }
455         }
456 
457         for (value = score_ [0]; value <= entry; value++) {
458          if (eSumAlpha_) *eSumAlpha_ += dynProgProb.getProb (value) * static_cast <double> (value);
459          if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.getProb (value) *
460                                         (1.0 - exp (lambda_ * static_cast <double> (value)));
461         }
462 
463         dynProgProb.setValueFct (n_bury);
464         dynProgProb.update (); // put probability into the morgue
465 
466         dynProgProb.setValueFct (n_step);
467         dynProgProb.update (); // iterate random walk
468 
469         if (time_ > 0.0)
470         {
471                         Sls::alp_data::get_current_time (time1);
472             if (time1 - time0 > time_)
473             {
474                 *terminated_ = true;
475                 return;
476             }
477         }
478 
479     }
480 
481     for (value = score_ [0]; value <= entry; value++) {
482       if (eSumAlpha_) *eSumAlpha_ += dynProgProb.getProb (value) * static_cast <double> (value);
483       if (eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.getProb (value) *
484                                      (1.0 - exp (lambda_ * static_cast <double> (value)));
485     }
486 
487     // check that not too much probability has been omitted
488     double prob = 0.0;
489     for (value = entry + 1; value < dynProgProb.getValueUpper (); value++) {
490       prob += dynProgProb.getProb (value);
491     }
492     prob += dynProgProb.getProbLost ();
493 
494 #ifdef _DEBUG /* to eliminate compiler warning in release mode */
495     const double FUDGE = 2.0;
496 #endif
497     _ASSERT (prob <= FUDGE * static_cast <double> (dimension_) * REL_TOL);
498 }
499 
descendingLadderEpoch(size_t dimension_,const Int4 * score_,const double * prob_,double * eSumAlpha_,double * eOneMinusExpSumAlpha_,bool isStrict_,double lambda0_,double mu0_,double muAssoc0_,double thetaMin0_,double rMin0_,double time_,bool * terminated_)500 void LocalMaxStatUtil::descendingLadderEpoch (
501 size_t dimension_, // #(distinct values)
502 const Int4 *score_, // values
503 const double *prob_, // probability of corresponding value
504 double *eSumAlpha_, // expectation (sum [alpha])
505 double *eOneMinusExpSumAlpha_, // expectation [1.0 - exp (sum [alpha])]
506 bool isStrict_, // ? is this a strict descending ladder epoch
507 double lambda0_, // lambda for flattened distribution (avoid recomputation)
508 double mu0_, // mean of flattened distribution (avoid recomputation)
509 double muAssoc0_, // mean of associated flattened distribution (avoid recomputation)
510 double thetaMin0_, // thetaMin of flattened distribution (avoid recomputation)
511 double rMin0_, // rMin of flattened distribution (avoid recomputation)
512 double time_, // get time for the dynamic programming computation
513 bool *terminated_) // ? Was the dynamic programming computation terminated prematurely ?
514 {
515    descendingLadderEpochRepeat (dimension_, score_, prob_,
516       eSumAlpha_, eOneMinusExpSumAlpha_, isStrict_, 0.0, 0, 0, 0,
517       lambda0_, mu0_, muAssoc0_, thetaMin0_, rMin0_, time_, terminated_);
518 }
519 
isProbDist(size_t dimension_,const double * prob_)520 bool LocalMaxStatUtil::isProbDist (
521 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
522 const double *prob_) // corresponding probabilities
523 {
524    double sum = 0.0;
525    for (size_t i = 0; i < dimension_; i++) {
526       if (prob_ [i] < 0.0 || 1.0 < prob_ [i]) return false;
527       sum += prob_ [i];
528    }
529    return Approx::relApprox (sum, 1.0, REL_TOL);
530 }
531 
isScoreIncreasing(size_t dimension_,const Int4 * score_)532 bool LocalMaxStatUtil::isScoreIncreasing (
533 size_t dimension_, // #(distinct values)
534 const Int4 *score_) // scores in increasing order
535 {
536    for (size_t i = 1; i < dimension_; i++) {
537       if (score_ [i] <= score_ [i - 1]) return false;
538    }
539    return true;
540 }
541 
isLogarithmic(size_t dimension_,const Int4 * score_,const double * prob_)542 bool LocalMaxStatUtil::isLogarithmic (
543 size_t dimension_, // #(distinct values) of scores & probabilities (which are paired)
544 const Int4 *score_, // scores in increasing order
545 const double *prob_) // corresponding probabilities
546 {
547    _ASSERT (score_);
548    _ASSERT (prob_);
549    if (! isScoreIncreasing (dimension_, score_)) return false;
550    if (! isProbDist (dimension_, prob_)) return false;
551    if (0.0 <= mu (dimension_, score_, prob_)) return false;
552    if (score_ [dimension_ - 1] <= 0.0) return false;
553    return true;
554 }
555 
556