1 // -*- C++ -*-
2 /**
3 * @brief Abstract top-level class for all distributions
4 *
5 * Copyright 2005-2021 Airbus-EDF-IMACS-ONERA-Phimeca
6 *
7 * This library is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library. If not, see <http://www.gnu.org/licenses/>.
19 *
20 */
21 #include <cmath>
22 #include <cstdlib>
23 #include <algorithm>
24
25 #include "openturns/PersistentObjectFactory.hxx"
26 #include "openturns/DistributionImplementation.hxx"
27 #include "openturns/Distribution.hxx"
28 #include "openturns/Exception.hxx"
29 #include "openturns/Log.hxx"
30 #include "openturns/IdentityMatrix.hxx"
31 #include "openturns/Collection.hxx"
32 #include "openturns/RandomGenerator.hxx"
33 #include "openturns/CompositeDistribution.hxx"
34 #include "openturns/Chi.hxx"
35 #include "openturns/ChiSquare.hxx"
36 #include "openturns/ComposedDistribution.hxx"
37 #include "openturns/Dirac.hxx"
38 #include "openturns/LogNormal.hxx"
39 #include "openturns/LogUniform.hxx"
40 #include "openturns/Mixture.hxx"
41 #include "openturns/Normal.hxx"
42 #include "openturns/RandomMixture.hxx"
43 #include "openturns/MaximumDistribution.hxx"
44 #include "openturns/ProductDistribution.hxx"
45 #include "openturns/SquaredNormal.hxx"
46 #include "openturns/TruncatedDistribution.hxx"
47 #include "openturns/Uniform.hxx"
48 #include "openturns/IndependentCopula.hxx"
49 #include "openturns/MarginalDistribution.hxx"
50 #include "openturns/MarginalTransformationEvaluation.hxx"
51 #include "openturns/MarginalTransformationGradient.hxx"
52 #include "openturns/MarginalTransformationHessian.hxx"
53 #include "openturns/RosenblattEvaluation.hxx"
54 #include "openturns/InverseRosenblattEvaluation.hxx"
55 #include "openturns/Function.hxx"
56 #include "openturns/SklarCopula.hxx"
57 #include "openturns/SpecFunc.hxx"
58 #include "openturns/PlatformInfo.hxx"
59 #include "openturns/Cloud.hxx"
60 #include "openturns/Contour.hxx"
61 #include "openturns/Curve.hxx"
62 #include "openturns/Polygon.hxx"
63 #include "openturns/Staircase.hxx"
64 #include "openturns/Drawable.hxx"
65 #include "openturns/Graph.hxx"
66 #include "openturns/Brent.hxx"
67 #include "openturns/Box.hxx"
68 #include "openturns/Tuples.hxx"
69 #include "openturns/Combinations.hxx"
70 #include "openturns/TBB.hxx"
71 #include "openturns/GaussKronrod.hxx"
72 #include "openturns/GaussLegendre.hxx"
73 #include "openturns/IteratedQuadrature.hxx"
74 #include "openturns/OptimizationProblem.hxx"
75 #include "openturns/TNC.hxx"
76 #include "openturns/TriangularMatrix.hxx"
77 #include "openturns/MethodBoundEvaluation.hxx"
78 #include "openturns/SobolSequence.hxx"
79 #include "openturns/SymbolicFunction.hxx"
80
81 BEGIN_NAMESPACE_OPENTURNS
82
83 CLASSNAMEINIT(DistributionImplementation)
84
85 typedef Collection<Distribution> DistributionCollection;
86
87 static const Factory<DistributionImplementation> Factory_DistributionImplementation;
88
89 /* Default constructor */
DistributionImplementation()90 DistributionImplementation::DistributionImplementation()
91 : PersistentObject()
92 , mean_(Point(1, 0.0))
93 , covariance_(CovarianceMatrix(1))
94 , gaussNodes_()
95 , gaussWeights_()
96 , integrationNodesNumber_(ResourceMap::GetAsUnsignedInteger("Distribution-DefaultIntegrationNodesNumber"))
97 , isAlreadyComputedMean_(false)
98 , isAlreadyComputedCovariance_(false)
99 , isAlreadyComputedGaussNodesAndWeights_(false)
100 , pdfEpsilon_(ResourceMap::GetAsScalar("Distribution-DefaultPDFEpsilon"))
101 , cdfEpsilon_(ResourceMap::GetAsScalar("Distribution-DefaultCDFEpsilon"))
102 , quantileEpsilon_(ResourceMap::GetAsScalar("Distribution-DefaultQuantileEpsilon"))
103 , quantileIterations_(ResourceMap::GetAsUnsignedInteger("Distribution-DefaultQuantileIteration"))
104 , isAlreadyComputedStandardDistribution_(false)
105 , p_standardDistribution_()
106 , isAlreadyCreatedGeneratingFunction_(false)
107 , generatingFunction_(0)
108 , dimension_(1)
109 , weight_(1.0)
110 // The range is empty by default
111 , range_(Interval(1.0, -1.0))
112 , description_(1)
113 , isParallel_(ResourceMap::GetAsBool("Distribution-Parallel"))
114 , isCopula_(false)
115 {
116 description_[0] = "X0";
117 }
118
119 /* Virtual constructor */
clone() const120 DistributionImplementation * DistributionImplementation::clone() const
121 {
122 return new DistributionImplementation(*this);
123 }
124
125 /* Comparison operator */
operator ==(const DistributionImplementation & other) const126 Bool DistributionImplementation::operator ==(const DistributionImplementation & other) const
127 {
128 if (this == &other) return true;
129 // Compare both this to other and other to this to ensure symmetry
130 return equals(other) && other.equals(*this);
131 }
132
equals(const DistributionImplementation &) const133 Bool DistributionImplementation::equals(const DistributionImplementation & ) const
134 {
135 throw NotYetImplementedException(HERE) << "In DistributionImplementation::equals";
136 }
137
138 /* Comparison operator */
operator !=(const DistributionImplementation & other) const139 Bool DistributionImplementation::operator !=(const DistributionImplementation & other) const
140 {
141 return !operator==(other);
142 }
143
144 /* Addition operator */
operator +(const Distribution & other) const145 Distribution DistributionImplementation::operator + (const Distribution & other) const
146 {
147 return operator + (*(other.getImplementation()));
148 }
149
operator +(const DistributionImplementation & other) const150 Distribution DistributionImplementation::operator + (const DistributionImplementation & other) const
151 {
152 if (dimension_ != other.dimension_)
153 throw InvalidDimensionException(HERE) << "Can only sum distributions with the same dimension:" << dimension_ << " vs " << other.dimension_;
154
155 if (dimension_ == 1)
156 {
157 if (getClassName() == "Dirac") return other + getRealization()[0];
158 Collection< Distribution > coll(2);
159 coll[0] = *this;
160 coll[1] = other.clone();
161 RandomMixture res(coll);
162 // Check if a simplification has occured
163 if ((res.getDistributionCollection().getSize() == 1) && (res.getWeights()(0, 0) == 1.0) && (res.getConstant()[0] == 0.0))
164 return res.getDistributionCollection()[0];
165 // No simplification
166 return res.clone();
167 }
168
169 if (!hasIndependentCopula() || !other.hasIndependentCopula())
170 throw NotYetImplementedException(HERE) << "Can only sum distributions with independent copulas";
171
172 Collection< Distribution > marginals(dimension_);
173 for (UnsignedInteger j = 0; j < dimension_; ++ j)
174 marginals[j] = getMarginal(j) + other.getMarginal(j);
175 return new ComposedDistribution(marginals);
176 }
177
operator +(const Scalar value) const178 Distribution DistributionImplementation::operator + (const Scalar value) const
179 {
180 if (value == 0.0) return clone();
181
182 if (dimension_ == 1)
183 {
184 if (getClassName() == "Dirac") return new Dirac(getRealization()[0] + value);
185 Collection< Distribution > coll(2);
186 coll[0] = *this;
187 coll[1] = Dirac(Point(1, value));
188 RandomMixture res(coll);
189 // Check if a simplification has occured
190 if ((res.getDistributionCollection().getSize() == 1) && (res.getWeights()(0, 0) == 1.0) && (res.getConstant()[0] == 0.0))
191 return res.getDistributionCollection()[0];
192 // No simplification
193 return res.clone();
194 }
195
196 Collection< Distribution > marginals(dimension_);
197 for (UnsignedInteger j = 0; j < dimension_; ++ j)
198 marginals[j] = getMarginal(j) + value;
199 return new ComposedDistribution(marginals, getCopula());
200 }
201
202 /* Subtraction operator */
operator -(const Distribution & other) const203 Distribution DistributionImplementation::operator - (const Distribution & other) const
204 {
205 return operator - (*(other.getImplementation()));
206 }
207
operator -(const DistributionImplementation & other) const208 Distribution DistributionImplementation::operator - (const DistributionImplementation & other) const
209 {
210 if (dimension_ != other.dimension_)
211 throw InvalidDimensionException(HERE) << "Can only subtract distributions with the same dimension:" << dimension_ << " vs " << other.dimension_;
212
213 Point weights(2);
214 weights[0] = 1.0;
215 weights[1] = -1.0;
216
217 if (dimension_ == 1)
218 {
219 if (other.getClassName() == "Dirac") return *this + (-other.getRealization()[0]);
220 Collection< Distribution > coll(2);
221 coll[0] = *this;
222 coll[1] = other.clone();
223 RandomMixture res(coll, weights);
224 // Check if a simplification has occured
225 if ((res.getDistributionCollection().getSize() == 1) && (res.getWeights()(0, 0) == 1.0) && (res.getConstant()[0] == 0.0))
226 return res.getDistributionCollection()[0];
227 // No simplification
228 return res.clone();
229 }
230
231 if (!hasIndependentCopula() || !other.hasIndependentCopula())
232 throw NotYetImplementedException(HERE) << "Can only subtract distributions with independent copulas";
233
234 Collection< Distribution > marginals(dimension_);
235 for (UnsignedInteger j = 0; j < dimension_; ++ j)
236 marginals[j] = getMarginal(j) - other.getMarginal(j);
237 return new ComposedDistribution(marginals);
238 }
239
operator -(const Scalar value) const240 Distribution DistributionImplementation::operator - (const Scalar value) const
241 {
242 if (value == 0.0) return clone();
243 return *this + (-value);
244 }
245
246 /* Multiplication operator */
operator *(const Distribution & other) const247 Distribution DistributionImplementation::operator * (const Distribution & other) const
248 {
249 return operator * (*(other.getImplementation()));
250 }
251
operator *(const DistributionImplementation & other) const252 Distribution DistributionImplementation::operator * (const DistributionImplementation & other) const
253 {
254 // Special case: Dirac distribution
255 if ((getDimension() == 1) && (other.getDimension() == 1))
256 {
257 if (getClassName() == "Dirac")
258 return other * getRealization()[0];
259 if (other.getClassName() == "Dirac")
260 return *this * other.getRealization()[0];
261 }
262 // Special case: LogNormal distributions
263 if ((getClassName() == "LogNormal") && (other.getClassName() == "LogNormal"))
264 {
265 const Point parameters(getParameter());
266 const Point otherParameters(other.getParameter());
267 return new LogNormal(parameters[0] + otherParameters[0], std::sqrt(parameters[1] * parameters[1] + otherParameters[1] * otherParameters[1]));
268 }
269 if ((getClassName() == "LogUniform") && (other.getClassName() == "LogUniform"))
270 {
271 const Point parameters(getParameter());
272 const Point otherParameters(other.getParameter());
273 return (Uniform(parameters[0], parameters[1]) + Uniform(otherParameters[0], otherParameters[1])).exp();
274 }
275 if ((getClassName() == "LogUniform") && (other.getClassName() == "LogNormal"))
276 {
277 const Point parameters(getParameter());
278 const Point otherParameters(other.getParameter());
279 return (Uniform(parameters[0], parameters[1]) + Normal(otherParameters[0], otherParameters[1])).exp();
280 }
281 if ((getClassName() == "LogNormal") && (other.getClassName() == "LogUniform"))
282 {
283 const Point parameters(getParameter());
284 const Point otherParameters(other.getParameter());
285 return (Normal(parameters[0], parameters[1]) + Uniform(otherParameters[0], otherParameters[1])).exp();
286 }
287 return new ProductDistribution(*this, other.clone());
288 }
289
operator *(const Scalar value) const290 Distribution DistributionImplementation::operator * (const Scalar value) const
291 {
292 if (dimension_ != 1) throw NotYetImplementedException(HERE) << "In DistributionImplementation::operator * (const Scalar value) const: can multiply by a constant 1D distributions only.";
293 if (value == 0.0) return new Dirac(Point(1, 0.0));
294 if (value == 1.0) return clone();
295 if (getClassName() == "Dirac") return new Dirac(getRealization()[0] * value);
296 const Collection< Distribution > coll(1, *this);
297 const Point weight(1, value);
298 RandomMixture res(coll, weight);
299 // If the weight has been integrated into the unique atom and there is no constant
300 if ((res.getWeights()(0, 0) == 1.0) && (res.getConstant()[0] == 0.0))
301 return res.getDistributionCollection()[0];
302 return res.clone();
303 }
304
305 /* Division operator */
operator /(const Distribution & other) const306 Distribution DistributionImplementation::operator / (const Distribution & other) const
307 {
308 return operator / (*(other.getImplementation()));
309 }
310
operator /(const DistributionImplementation & other) const311 Distribution DistributionImplementation::operator / (const DistributionImplementation & other) const
312 {
313 if ((dimension_ != 1) || (other.dimension_ != 1)) throw NotYetImplementedException(HERE) << "In DistributionImplementation::operator / (const DistributionImplementation & other) const: can multiply 1D distributions only.";
314 if (other.getClassName() == "Dirac")
315 {
316 const Scalar otherValue = other.getRealization()[0];
317 if (otherValue == 0.0) throw InvalidArgumentException(HERE) << "Error: cannot divide by a Dirac distribution located in 0";
318 return *this * (1.0 / otherValue);
319 }
320 return operator * (other.inverse());
321 }
322
operator /(const Scalar value) const323 Distribution DistributionImplementation::operator / (const Scalar value) const
324 {
325 if (dimension_ != 1) throw NotYetImplementedException(HERE) << "In DistributionImplementation::operator / (const Scalar value) const: can divide multiply by a constant 1D distributions only.";
326 if (value == 0.0) throw InvalidArgumentException(HERE) << "Error: cannot divide by 0.";
327 if (value == 1.0) return clone();
328 return (*this) * (1.0 / value);
329 }
330
331 /* Product operator */
operator *(const Scalar scalar,const DistributionImplementation & distribution)332 Distribution operator * (const Scalar scalar,
333 const DistributionImplementation & distribution)
334 {
335 return distribution * scalar;
336 }
337
operator *(const Scalar scalar,const DistributionImplementation::Implementation & p_distribution)338 Distribution operator * (const Scalar scalar,
339 const DistributionImplementation::Implementation & p_distribution)
340 {
341 return (*p_distribution) * scalar;
342 }
343
344 /* Division operator */
operator /(const Scalar scalar,const DistributionImplementation & distribution)345 Distribution operator / (const Scalar scalar,
346 const DistributionImplementation & distribution)
347 {
348 return (distribution.inverse()) * scalar;
349 }
350
operator /(const Scalar scalar,const DistributionImplementation::Implementation & p_distribution)351 Distribution operator / (const Scalar scalar,
352 const DistributionImplementation::Implementation & p_distribution)
353 {
354 return (*p_distribution).inverse() * scalar;
355 }
356
357 /* Addition operator */
operator +(const Scalar scalar,const DistributionImplementation & distribution)358 Distribution operator + (const Scalar scalar,
359 const DistributionImplementation & distribution)
360 {
361 return distribution + scalar;
362 }
363
operator +(const Scalar scalar,const DistributionImplementation::Implementation & p_distribution)364 Distribution operator + (const Scalar scalar,
365 const DistributionImplementation::Implementation & p_distribution)
366 {
367 return (*p_distribution) + scalar;
368 }
369
370 /* Subtraction operator */
operator -(const Scalar scalar,const DistributionImplementation & distribution)371 Distribution operator - (const Scalar scalar,
372 const DistributionImplementation & distribution)
373 {
374 return (distribution * (-1.0)) + scalar;
375 }
376
operator -(const Scalar scalar,const DistributionImplementation::Implementation & p_distribution)377 Distribution operator - (const Scalar scalar,
378 const DistributionImplementation::Implementation & p_distribution)
379 {
380 return ((*p_distribution) * (-1.0)) + scalar;
381 }
382
operator -(const DistributionImplementation & distribution)383 Distribution operator - (const DistributionImplementation & distribution)
384 {
385 return distribution * (-1.0);
386 }
387
operator -(const DistributionImplementation::Implementation & p_distribution)388 Distribution operator - (const DistributionImplementation::Implementation & p_distribution)
389 {
390 return (*p_distribution) * (-1.0);
391 }
392
393
maximum(const DistributionImplementation::Implementation & p_left,const DistributionImplementation::Implementation & p_right)394 Distribution maximum(const DistributionImplementation::Implementation & p_left,
395 const DistributionImplementation::Implementation & p_right)
396 {
397 MaximumDistribution::DistributionCollection coll(2);
398 coll[0] = p_left;
399 coll[1] = p_right;
400 return new MaximumDistribution(coll);
401 }
402
maximum(const DistributionImplementation & left,const DistributionImplementation::Implementation & p_right)403 Distribution maximum(const DistributionImplementation & left,
404 const DistributionImplementation::Implementation & p_right)
405 {
406 MaximumDistribution::DistributionCollection coll(2);
407 coll[0] = left;
408 coll[1] = p_right;
409 return new MaximumDistribution(coll);
410 }
411
maximum(const DistributionImplementation::Implementation & p_left,const DistributionImplementation & right)412 Distribution maximum(const DistributionImplementation::Implementation & p_left,
413 const DistributionImplementation & right)
414 {
415 MaximumDistribution::DistributionCollection coll(2);
416 coll[0] = p_left;
417 coll[1] = right;
418 return new MaximumDistribution(coll);
419 }
420
maximum(const DistributionImplementation & left,const DistributionImplementation & right)421 Distribution maximum(const DistributionImplementation & left,
422 const DistributionImplementation & right)
423 {
424 MaximumDistribution::DistributionCollection coll(2);
425 coll[0] = left;
426 coll[1] = right;
427 return new MaximumDistribution(coll);
428 }
429
430
431 /* String converter */
__repr__() const432 String DistributionImplementation::__repr__() const
433 {
434 OSS oss(true);
435 oss << "class=" << DistributionImplementation::GetClassName()
436 << " description=" << description_;
437 return oss;
438 }
439
440 /* String converter */
__str__(const String &) const441 String DistributionImplementation::__str__(const String & ) const
442 {
443 return __repr__();
444 }
445
446
447 /* Weight accessor */
setWeight(const Scalar w)448 void DistributionImplementation::setWeight(const Scalar w)
449 {
450 weight_ = w;
451 }
452
453 /* Weight accessor */
getWeight() const454 Scalar DistributionImplementation::getWeight() const
455 {
456 return weight_;
457 }
458
459
460 /* Dimension accessor */
getDimension() const461 UnsignedInteger DistributionImplementation::getDimension() const
462 {
463 return dimension_;
464 }
465
466 /* Get the roughness, i.e. the L2-norm of the PDF */
getRoughness() const467 Scalar DistributionImplementation::getRoughness() const
468 {
469 const Interval interval(getRange());
470
471 // Use adaptive multidimensional integration of the PDF on the reduced interval
472 const PDFSquaredWrapper pdfSquaredWrapper(this);
473 Scalar roughness = 0.0;
474 if (dimension_ == 1)
475 {
476 Scalar error = -1.0;
477 const Point singularities(getSingularities());
478 // If no singularity inside of the given reduced interval
479 const UnsignedInteger singularitiesNumber = singularities.getSize();
480 const Scalar lower = interval.getLowerBound()[0];
481 const Scalar upper = interval.getUpperBound()[0];
482 if (singularitiesNumber == 0 || singularities[0] >= upper || singularities[singularitiesNumber - 1] <= lower)
483 roughness = GaussKronrod().integrate(pdfSquaredWrapper, interval, error)[0];
484 else
485 {
486 Scalar a = lower;
487 for (UnsignedInteger i = 0; i < singularitiesNumber; ++i)
488 {
489 const Scalar b = singularities[i];
490 if (b > lower && b < upper)
491 {
492 roughness += GaussKronrod().integrate(pdfSquaredWrapper, Interval(a, b), error)[0];
493 a = b;
494 }
495 // Exit the loop if no more singularities inside of the reduced interval
496 if (b >= upper)
497 break;
498 } // for
499 // Last contribution
500 roughness += GaussKronrod().integrate(pdfSquaredWrapper, Interval(a, upper), error)[0];
501 } // else
502 return std::max(0.0, roughness);
503 } // dimension_ == 1
504
505 // Dimension > 1
506 if (hasIndependentCopula())
507 {
508 roughness = 1.0;
509 for (UnsignedInteger i = 0; i < dimension_; ++i)
510 roughness *= getMarginal(i).getRoughness();
511 }
512 else
513 {
514 // Small dimension
515 if (dimension_ <= ResourceMap::GetAsUnsignedInteger("Distribution-SmallDimensionRoughness"))
516 {
517 roughness = IteratedQuadrature().integrate(pdfSquaredWrapper, interval)[0];
518 } // Small dimension
519 else
520 {
521 const UnsignedInteger size = ResourceMap::GetAsUnsignedInteger("Distribution-RoughnessSamplingSize");
522 const String samplingMethod(ResourceMap::GetAsString("Distribution-RoughnessSamplingMethod"));
523 Sample sample;
524 if (samplingMethod == "MonteCarlo")
525 sample = getSample(size);
526 else if (samplingMethod == "QuasiMonteCarlo")
527 sample = getSampleByQMC(size);
528 else
529 {
530 LOGWARN(OSS() << "Unknown sampling method=" << samplingMethod << " to compute roughness. Resort to MonteCarlo");
531 sample = getSample(size);
532 }
533 roughness = computePDF(sample).computeMean()[0];
534 }
535
536 }
537 // Roughness is a L2-norm, so must be positive
538 return std::max(0.0, roughness);
539 }
540
541 /* Dimension accessor */
setDimension(const UnsignedInteger dim)542 void DistributionImplementation::setDimension(const UnsignedInteger dim)
543 {
544 if (dim == 0) throw InvalidArgumentException(HERE) << "Dimension argument must be an integer >= 1, here dim = " << dim;
545 if (dim != dimension_)
546 {
547 dimension_ = dim;
548 isAlreadyComputedMean_ = false;
549 isAlreadyComputedCovariance_ = false;
550 isAlreadyComputedGaussNodesAndWeights_ = false;
551 // Check if the current description is compatible with the new dimension
552 if (description_.getSize() != dim) description_ = Description::BuildDefault(dim, "X");
553 }
554 }
555
556 /* Get one realization of the distribution */
getRealization() const557 Point DistributionImplementation::getRealization() const
558 {
559 return getRealizationByInversion();
560 }
561
562 /* Get a numerical sample whose elements follow the distributionImplementation */
getSample(const UnsignedInteger size) const563 Sample DistributionImplementation::getSample(const UnsignedInteger size) const
564 {
565 SampleImplementation returnSample(size, dimension_);
566 UnsignedInteger shift = 0;
567 for (UnsignedInteger i = 0; i < size; ++ i)
568 {
569 const Point point(getRealization());
570 std::copy(point.begin(), point.end(), returnSample.data_begin() + shift);
571 shift += dimension_;
572 }
573 returnSample.setName(getName());
574 returnSample.setDescription(getDescription());
575 return returnSample;
576 }
577
578 /* Get one realization of the distribution */
getRealizationByInversion() const579 Point DistributionImplementation::getRealizationByInversion() const
580 {
581 return getSampleByInversion(1)[0];
582 }
583
584 /* Get a numerical sample whose elements follow the distributionImplementation */
getSampleByInversion(const UnsignedInteger size) const585 Sample DistributionImplementation::getSampleByInversion(const UnsignedInteger size) const
586 {
587 SampleImplementation returnSample(size, dimension_);
588 UnsignedInteger shift = 0;
589 for (UnsignedInteger i = 0; i < size; ++ i)
590 {
591 const Point point(computeSequentialConditionalQuantile(RandomGenerator::Generate(dimension_)));
592 std::copy(point.begin(), point.end(), returnSample.data_begin() + shift);
593 shift += dimension_;
594 }
595 returnSample.setName(getName());
596 returnSample.setDescription(getDescription());
597 return returnSample;
598 }
599
getSampleByQMC(const UnsignedInteger size) const600 Sample DistributionImplementation::getSampleByQMC(const UnsignedInteger size) const
601 {
602 static SobolSequence sequence(dimension_);
603 SampleImplementation returnSample(size, dimension_);
604 UnsignedInteger shift = 0;
605 for (UnsignedInteger i = 0; i < size; ++ i)
606 {
607 const Point point(computeSequentialConditionalQuantile(sequence.generate()));
608 std::copy(point.begin(), point.end(), returnSample.data_begin() + shift);
609 shift += dimension_;
610 }
611 returnSample.setName(getName());
612 returnSample.setDescription(getDescription());
613 return returnSample;
614 }
615
616 /* Get the DDF of the distribution */
computeDDF(const Point & point) const617 Point DistributionImplementation::computeDDF(const Point & point) const
618 {
619 if (isContinuous())
620 {
621 const UnsignedInteger dimension = getDimension();
622 Point ddf(dimension);
623 const Scalar h = std::pow(pdfEpsilon_, 1.0 / 3.0);
624 LOGINFO(OSS() << "h=" << h);
625 for (UnsignedInteger i = 0; i < dimension; ++i)
626 {
627 Point left(point);
628 left[i] += h;
629 Point right(point);
630 right[i] -= h;
631 const Scalar denom = left[i] - right[i];
632 const Scalar pdfLeft = computePDF(left);
633 const Scalar pdfRight = computePDF(right);
634 ddf[i] = (pdfLeft - pdfRight) / denom;
635 LOGINFO(OSS() << "left=" << left << ", right=" << right << ", pdfLeft=" << pdfLeft << ", pdfRight=" << pdfRight);
636 }
637 return ddf;
638 }
639 if (dimension_ == 1)
640 {
641 Point ddf(dimension_);
642 const Scalar cdfPoint = computeCDF(point);
643 const Scalar h = std::pow(cdfEpsilon_, 0.25);
644 const Scalar idenom = 1.0 / std::sqrt(cdfEpsilon_);
645 for (UnsignedInteger i = 0; i < dimension_; ++ i)
646 {
647 Point epsilon(dimension_, 0.0);
648 epsilon[i] = h;
649 ddf[i] = (computeCDF(point + epsilon) - 2.0 * cdfPoint + computeCDF(point - epsilon)) * idenom;
650 }
651 return ddf;
652 }
653 throw NotDefinedException(HERE) << "In DistributionImplementation::computeDDF()";
654 }
655
656 /* Get the PDF of the distribution */
computePDF(const Point & point) const657 Scalar DistributionImplementation::computePDF(const Point & point) const
658 {
659 const Scalar epsilon = 2.0 * std::pow(cdfEpsilon_, 1.0 / 3.0);
660 const Sample xSample(((Box(Indices(dimension_, 0)).generate() - Point(dimension_, 0.5)) * Point(dimension_, epsilon)) + point);
661 const Sample cdfSample(computeCDF(xSample));
662 Scalar pdf = 0.0;
663 const UnsignedInteger iMax = cdfSample.getSize();
664 for (UnsignedInteger i = 0; i < iMax; ++ i)
665 {
666 // The points with an even number of positive shifts are counted positively
667 // The others are counted negatively
668 const UnsignedInteger numNullBits = dimension_ - SpecFunc::BitCount(i);
669 pdf += (1.0 - 2.0 * (numNullBits % 2)) * cdfSample(i, 0);
670 }
671 return pdf / std::pow(epsilon, 1.0 * dimension_);
672 }
673
computeLogPDF(const Point & point) const674 Scalar DistributionImplementation::computeLogPDF(const Point & point) const
675 {
676 const Scalar pdf = computePDF(point);
677 Scalar logPdf = SpecFunc::LowestScalar;
678 if ( pdf > 0.0 ) logPdf = std::log(pdf);
679 return logPdf;
680 }
681
682 /* Get the CDF, complementary CDF and survival function of the distribution */
683 /* On a Point */
computeCDF(const Point &) const684 Scalar DistributionImplementation::computeCDF(const Point & ) const
685 {
686 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeCDF(const Point & point) const";
687 }
688
computeComplementaryCDF(const Point & point) const689 Scalar DistributionImplementation::computeComplementaryCDF(const Point & point) const
690 {
691 const Scalar cdf = computeCDF(point);
692 return 0.5 + (0.5 - cdf);
693 }
694
695 /* Computation of the survival function:
696 A_k = \{X_k > x_k\}, k=0..n-1
697 P(\cap A_k) = 1 - \bar{P}(\cap A_k)
698 = 1 + \sum_{j=0}^{n-1}(-1)^j\sum_{\{k_0,\dots,k_{j-1}\}\subset\{0,\dots,n-1\}P(\bar{A}_{k_0},\dots,\bar{A}_{k_{j-1}})
699 so
700 n=1:
701 P(X_1>x_1) = 1 - P(X_1\leq x_1)
702
703 n=2:
704 P(X_1>x_1, X_2>x_2) = 1 - P(X_1\leq x_1) - P(X_2\leq x_2) + P(X_1\leq x_1, X_2\leq x_2)
705
706 n=3:
707 P(X_1>x_1, X_2>x_2, X_3>x_3) = 1 - P(X_1\leq x_1) - P(X_2\leq x_2) - P(X_3\leq x_3) + P(X_1\leq x_1, X_2\leq x_2) + P(X_1\leq x_1, X_3\leq x_3) + P(X_2\leq x_2, X_3\leq x_3) - P(X_1\leq x_1, X_2\leq x_2, X_3\leq x_3)
708 */
computeSurvivalFunction(const Point & point) const709 Scalar DistributionImplementation::computeSurvivalFunction(const Point & point) const
710 {
711 if (dimension_ == 1) return computeComplementaryCDF(point);
712 // Special case for independent copula
713 if (hasIndependentCopula())
714 {
715 Scalar value = 1.0;
716 for (UnsignedInteger i = 0; i < dimension_; ++i) value *= getMarginal(i).computeComplementaryCDF(point[i]);
717 return value;
718 }
719 // For elliptical distributions,
720 // P(X_1-mu_1<=x_1,...,X_d-mu_d<=x_d)=P(X_1-mu_1>=-x_1,...,X_d-mu_d>=-x_d)
721 // So
722 // P(X_1>=x_1,...,X_d>=x_d)=P(X_1<=2mu_1-x_1,...,X_d<=2mu_d-x_d)
723 if (isElliptical()) return computeCDF(getMean() * 2.0 - point);
724 const Point lowerBounds(getRange().getLowerBound());
725 const Point upperBounds(getRange().getUpperBound());
726 Bool allOutside = true;
727 for (UnsignedInteger i = 0; i < dimension_; ++ i)
728 {
729 if (point[i] >= upperBounds[i]) return 0.0;
730 allOutside &= (point[i] <= lowerBounds[i]);
731 }
732 if (allOutside) return 1.0;
733
734 // Use Poincaré's formula
735 const Scalar cdf = computeCDF(point);
736 Scalar value = 1.0 + (dimension_ % 2 == 0 ? cdf : -cdf);
737 Scalar sign = -1.0;
738 for (UnsignedInteger i = 1; i < dimension_; ++ i)
739 {
740 Scalar contribution = 0.0;
741 IndicesCollection indices(Combinations(i, dimension_).generate());
742 Point subPoint(i);
743 for (UnsignedInteger j = 0; j < indices.getSize(); ++j)
744 {
745 const Indices marginalJ(indices.cbegin_at(j), indices.cend_at(j));
746 for (UnsignedInteger k = 0; k < i; ++k) subPoint[k] = point[marginalJ[k]];
747 contribution += getMarginal(marginalJ).computeCDF(subPoint);
748 }
749 value += sign * contribution;
750 sign = -sign;
751 }
752 // Due to roundoff, the value can be slightly outside of [0,1]
753 return std::min(1.0, std::max(0.0, value));
754 }
755
computeInverseSurvivalFunction(const Scalar prob) const756 Point DistributionImplementation::computeInverseSurvivalFunction(const Scalar prob) const
757 {
758 Scalar marginalProb = 0.0;
759 return computeInverseSurvivalFunction(prob, marginalProb);
760 }
761
computeInverseSurvivalFunction(const Scalar prob,Scalar & marginalProb) const762 Point DistributionImplementation::computeInverseSurvivalFunction(const Scalar prob,
763 Scalar & marginalProb) const
764 {
765 // Special case for bording values
766 marginalProb = prob;
767 if (prob < 0.0) return getRange().getUpperBound();
768 if (prob >= 1.0) return getRange().getLowerBound();
769 // Special case for dimension 1
770 if (dimension_ == 1) return Point(1, computeScalarQuantile(prob, true));
771 // Special case for independent copula
772 if (hasIndependentCopula())
773 {
774 Point result(dimension_);
775 marginalProb = std::pow(prob, 1.0 / dimension_);
776 for (UnsignedInteger i = 0; i < dimension_; ++i) result[i] = getMarginal(i).computeScalarQuantile(marginalProb, true);
777 return result;
778 }
779 // For elliptical distributions,
780 // P(X_1-mu_1<=x_1,...,X_d-mu_d<=x_d)=P(X_1-mu_1>=-x_1,...,X_d-mu_d>=-x_d)
781 // So
782 // P(X_1>=x_1,...,X_d>=x_d)=P(X_1<=2mu_1-x_1,...,X_d<=2mu_d-x_d)
783 // So
784 // InverseSurvivalFunction(q) = 2mu-Quantile(q)
785 if (isElliptical()) return getMean() * 2.0 - computeQuantile(prob, false, marginalProb);
786 // Extract the marginal distributions
787 Collection<Implementation> marginals(dimension_);
788 for (UnsignedInteger i = 0; i < dimension_; i++) marginals[i] = getMarginal(i).getImplementation();
789 // The n-D inverse survival function is defined as X(\tau) = (S_1^{-1}(\tau), ..., S_n^{-1}(\tau)),
790 // with tau such as S(X(\tau)) = q.
791 // As F(x) = C(F_1(x_1),...,F_n(x_n)), the constraint F(X(\tau)) = q reads:
792 // C(\tau,...,\tau) = q
793 // Bracketing of \tau using the Frechet Hoeffding bounds:
794 // max(n\tau - n + 1, 0) <= C(\tau,...,\tau) <= \tau
795 // from which we deduce that q <= \tau and \tau <= 1 - (1 - q) / n
796 // Lower bound of the bracketing interval
797 const SurvivalFunctionWrapper wrapper(marginals, this);
798 const Function f(bindMethod<SurvivalFunctionWrapper, Point, Point>(wrapper, &SurvivalFunctionWrapper::computeDiagonal, 1, 1));
799 Scalar leftTau = prob;
800 Scalar leftSurvival = f(Point(1, leftTau))[0];
801 // Due to numerical precision issues, the theoretical bound can be slightly violated
802 if (leftSurvival > prob)
803 {
804 leftTau = 0.0;
805 leftSurvival = 1.0;
806 }
807 // Upper bound of the bracketing interval
808 Scalar rightTau = 1.0 - (1.0 - prob) / dimension_;
809 Scalar rightSurvival = f(Point(1, rightTau))[0];
810 // Due to numerical precision issues, the theoretical bound can be slightly violated
811 if (rightSurvival < prob)
812 {
813 rightTau = 1.0;
814 rightSurvival = 0.0;
815 }
816 LOGDEBUG(OSS() << "DistributionImplementation::computeInverseSurvivalFunction: dimension=" << dimension_ << ", prob=" << prob << ", leftTau=" << leftTau << ", leftSurvival=" << leftSurvival << ", rightTau=" << rightTau << ", rightSurvival=" << rightSurvival);
817 // Use Brent's method to compute the quantile efficiently for continuous distributions
818 const Brent solver(quantileEpsilon_, cdfEpsilon_, cdfEpsilon_, quantileIterations_);
819 marginalProb = solver.solve(f, prob, leftTau, rightTau, leftSurvival, rightSurvival);
820 LOGINFO(OSS(false) << "tau=" << marginalProb);
821 return wrapper.diagonalToSpace(marginalProb);
822 }
823
824 /* On a Sample */
computeCDFSequential(const Sample & inSample) const825 Sample DistributionImplementation::computeCDFSequential(const Sample & inSample) const
826 {
827 const UnsignedInteger size = inSample.getSize();
828 Sample outSample(size, 1);
829 for (UnsignedInteger i = 0; i < size; ++ i) outSample(i, 0) = computeCDF(inSample[i]);
830 return outSample;
831 }
832
833 struct ComputeCDFPolicy
834 {
835 const Sample & input_;
836 Sample & output_;
837 const DistributionImplementation & distribution_;
838
ComputeCDFPolicyComputeCDFPolicy839 ComputeCDFPolicy( const Sample & input,
840 Sample & output,
841 const DistributionImplementation & distribution)
842 : input_(input)
843 , output_(output)
844 , distribution_(distribution)
845 {}
846
operator ()ComputeCDFPolicy847 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
848 {
849 for (UnsignedInteger i = r.begin(); i != r.end(); ++ i) output_(i, 0) = distribution_.computeCDF(input_[i]);
850 }
851
852 }; /* end struct ComputeCDFPolicy */
853
computeCDFParallel(const Sample & inSample) const854 Sample DistributionImplementation::computeCDFParallel(const Sample & inSample) const
855 {
856 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
857 const UnsignedInteger size = inSample.getSize();
858 Sample result(size, 1);
859 const ComputeCDFPolicy policy( inSample, result, *this );
860 TBB::ParallelFor( 0, size, policy );
861 return result;
862 }
863
computeCDF(const Sample & inSample) const864 Sample DistributionImplementation::computeCDF(const Sample & inSample) const
865 {
866 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
867 if (isParallel_) return computeCDFParallel(inSample);
868 else return computeCDFSequential(inSample);
869 }
870
computeComplementaryCDFSequential(const Sample & inSample) const871 Sample DistributionImplementation::computeComplementaryCDFSequential(const Sample & inSample) const
872 {
873 const UnsignedInteger size = inSample.getSize();
874 Sample outSample(size, 1);
875 for (UnsignedInteger i = 0; i < size; ++ i) outSample(i, 0) = computeComplementaryCDF(inSample[i]);
876 return outSample;
877 }
878
879 struct ComputeComplementaryCDFPolicy
880 {
881 const Sample & input_;
882 Sample & output_;
883 const DistributionImplementation & distribution_;
884
ComputeComplementaryCDFPolicyComputeComplementaryCDFPolicy885 ComputeComplementaryCDFPolicy( const Sample & input,
886 Sample & output,
887 const DistributionImplementation & distribution)
888 : input_(input)
889 , output_(output)
890 , distribution_(distribution)
891 {}
892
operator ()ComputeComplementaryCDFPolicy893 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
894 {
895 for (UnsignedInteger i = r.begin(); i != r.end(); ++ i) output_(i, 0) = distribution_.computeComplementaryCDF(input_[i]);
896 }
897
898 }; /* end struct ComputeComplementaryCDFPolicy */
899
computeComplementaryCDFParallel(const Sample & inSample) const900 Sample DistributionImplementation::computeComplementaryCDFParallel(const Sample & inSample) const
901 {
902 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
903 const UnsignedInteger size = inSample.getSize();
904 Sample result(size, 1);
905 const ComputeComplementaryCDFPolicy policy( inSample, result, *this );
906 TBB::ParallelFor( 0, size, policy );
907 return result;
908 }
909
computeComplementaryCDF(const Sample & inSample) const910 Sample DistributionImplementation::computeComplementaryCDF(const Sample & inSample) const
911 {
912 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
913 if (isParallel_) return computeComplementaryCDFParallel(inSample);
914 else return computeComplementaryCDFSequential(inSample);
915 }
916
computeSurvivalFunctionSequential(const Sample & inSample) const917 Sample DistributionImplementation::computeSurvivalFunctionSequential(const Sample & inSample) const
918 {
919 const UnsignedInteger size = inSample.getSize();
920 Sample outSample(size, 1);
921 for (UnsignedInteger i = 0; i < size; ++ i) outSample(i, 0) = computeSurvivalFunction(inSample[i]);
922 return outSample;
923 }
924
925 struct ComputeSurvivalFunctionPolicy
926 {
927 const Sample & input_;
928 Sample & output_;
929 const DistributionImplementation & distribution_;
930
ComputeSurvivalFunctionPolicyComputeSurvivalFunctionPolicy931 ComputeSurvivalFunctionPolicy( const Sample & input,
932 Sample & output,
933 const DistributionImplementation & distribution)
934 : input_(input)
935 , output_(output)
936 , distribution_(distribution)
937 {}
938
operator ()ComputeSurvivalFunctionPolicy939 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
940 {
941 for (UnsignedInteger i = r.begin(); i != r.end(); ++i) output_(i, 0) = distribution_.computeSurvivalFunction(input_[i]);
942 }
943
944 }; /* end struct ComputeSurvivalFunctionPolicy */
945
computeSurvivalFunctionParallel(const Sample & inSample) const946 Sample DistributionImplementation::computeSurvivalFunctionParallel(const Sample & inSample) const
947 {
948 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
949 const UnsignedInteger size = inSample.getSize();
950 Sample result(size, 1);
951 const ComputeSurvivalFunctionPolicy policy( inSample, result, *this );
952 TBB::ParallelFor( 0, size, policy );
953 return result;
954 }
955
computeSurvivalFunction(const Sample & inSample) const956 Sample DistributionImplementation::computeSurvivalFunction(const Sample & inSample) const
957 {
958 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
959 if (isParallel_) return computeSurvivalFunctionParallel(inSample);
960 else return computeSurvivalFunctionSequential(inSample);
961 }
962
963 /* Compute the probability content of an interval */
computeProbability(const Interval & interval) const964 Scalar DistributionImplementation::computeProbability(const Interval & interval) const
965 {
966 if (interval.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: expected an interval of dimension=" << dimension_ << ", got dimension=" << interval.getDimension();
967 // Empty interval, quick check. More checks will be done in the refined algorithms
968 if (interval.isEmpty()) return 0.0;
969 // Generic implementation for univariate distributions
970 if (dimension_ == 1)
971 {
972 const Bool finiteA = interval.getFiniteLowerBound()[0] == 1;
973 const Bool finiteB = interval.getFiniteUpperBound()[0] == 1;
974 if (finiteA)
975 {
976 const Scalar a = interval.getLowerBound()[0];
977 const Scalar ccdfA = computeComplementaryCDF(a);
978 if (finiteB)
979 {
980 // [a, b]
981 const Scalar b = interval.getUpperBound()[0];
982 if (ccdfA <= 0.5)
983 {
984 const Scalar ccdfB = computeComplementaryCDF(b);
985 return ccdfA - ccdfB;
986 }
987 const Scalar cdfA = computeCDF(a);
988 const Scalar cdfB = computeCDF(b);
989 return cdfB - cdfA;
990 }
991 // [a,+inf)
992 return ccdfA;
993 }
994 // (-inf, b]
995 if (finiteB) return computeCDF(interval.getUpperBound()[0]);
996 // (-inf, +inf)
997 return 1.0;
998 }
999 // Generic implementation for continuous distributions
1000 if (isContinuous()) return computeProbabilityContinuous(interval);
1001 // Generic implementation for discrete distributions
1002 if (isDiscrete()) return computeProbabilityDiscrete(interval);
1003 // Generic implementation for general distributions
1004 return computeProbabilityGeneral(interval);
1005 }
1006
1007 /* Get the probability content of an interval, continuous case */
computeProbabilityContinuous(const Interval & interval) const1008 Scalar DistributionImplementation::computeProbabilityContinuous(const Interval & interval) const
1009 {
1010 const Interval reducedInterval(interval.intersect(getRange()));
1011 if (reducedInterval.isEmpty()) return 0.0;
1012 if (reducedInterval == getRange()) return 1.0;
1013 // Use adaptive multidimensional integration of the PDF on the reduced interval
1014 const PDFWrapper pdfWrapper(this);
1015 Scalar probability = 0.0;
1016 if (dimension_ == 1)
1017 {
1018 Scalar error = -1.0;
1019 const Point singularities(getSingularities());
1020 // If no singularity inside of the given reduced interval
1021 const UnsignedInteger singularitiesNumber = singularities.getSize();
1022 const Scalar lower = reducedInterval.getLowerBound()[0];
1023 const Scalar upper = reducedInterval.getUpperBound()[0];
1024 if (singularitiesNumber == 0 || singularities[0] >= upper || singularities[singularitiesNumber - 1] <= lower) probability = GaussKronrod().integrate(pdfWrapper, reducedInterval, error)[0];
1025 else
1026 {
1027 Scalar a = lower;
1028 for (UnsignedInteger i = 0; i < singularitiesNumber; ++i)
1029 {
1030 const Scalar b = singularities[i];
1031 if (b > lower && b < upper)
1032 {
1033 probability += GaussKronrod().integrate(pdfWrapper, Interval(a, b), error)[0];
1034 a = b;
1035 }
1036 // Exit the loop if no more singularities inside of the reduced interval
1037 if (b >= upper) break;
1038 } // for
1039 // Last contribution
1040 probability += GaussKronrod().integrate(pdfWrapper, Interval(a, upper), error)[0];
1041 } // else
1042 } // dimension_ == 1
1043 else
1044 {
1045 if (hasIndependentCopula())
1046 {
1047 const Point lower(interval.getLowerBound());
1048 const Point upper(interval.getLowerBound());
1049 probability = 1.0;
1050 for (UnsignedInteger i = 0; i < dimension_; ++i) probability *= getMarginal(i).computeProbability(Interval(lower[i], upper[i]));
1051 }
1052 else
1053 {
1054 probability = IteratedQuadrature().integrate(pdfWrapper, reducedInterval)[0];
1055 }
1056 } // dimension > 1
1057 return std::min(1.0, std::max(0.0, probability));
1058 }
1059
1060 /* Get the probability content of an interval, discrete case */
computeProbabilityDiscrete(const Interval & interval) const1061 Scalar DistributionImplementation::computeProbabilityDiscrete(const Interval & interval) const
1062 {
1063 const Sample support(getSupport(interval));
1064 Scalar value = 0.0;
1065 for (UnsignedInteger i = 0; i < support.getSize(); ++i) value += computePDF(support[i]);
1066 return value;
1067 }
1068
1069 /* Get the probability content of an interval, general case */
computeProbabilityGeneral(const Interval & interval) const1070 Scalar DistributionImplementation::computeProbabilityGeneral(const Interval & interval) const
1071 {
1072 const Interval reducedInterval(interval.intersect(getRange()));
1073 if (reducedInterval.isEmpty()) return 0.0;
1074 if (reducedInterval == getRange()) return 1.0;
1075 // P(\bigcap_i ai < Xi \leq bi) = \sum_c (−1)^n(c) F(c_1,c_2,...,c_n)
1076 // with c = (c_i, i =1, ..., n), c_i \in [a_i, b_i]
1077 // and n(c) = Card({c_i == a_i, i = 1, ..., n})
1078 Scalar probability = 0.0;
1079 const Point a(reducedInterval.getLowerBound());
1080 const Point b(reducedInterval.getUpperBound());
1081 const UnsignedInteger iMax = 1 << dimension_;
1082 for( UnsignedInteger i = 0; i < iMax; ++i )
1083 {
1084 Bool evenLower = true;
1085 Point c(b);
1086 for( UnsignedInteger j = 0; j < dimension_; ++j )
1087 {
1088 const UnsignedInteger mask = 1 << j;
1089 if (i & mask)
1090 {
1091 c[j] = a[j];
1092 evenLower = (!evenLower);
1093 }
1094 } // j
1095 const Scalar cdf = computeCDF(c);
1096 probability += (evenLower ? cdf : -cdf);
1097 } // i
1098 return probability;
1099 }
1100
1101
1102 /* Get the characteristic function of the distribution, i.e. phi(u) = E(exp(I*u*X)) */
computeCharacteristicFunction(const Scalar x) const1103 Complex DistributionImplementation::computeCharacteristicFunction(const Scalar x) const
1104 {
1105 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: cannot use the computeCharacteristicFunction method with distributions of dimension > 1";
1106 if (x == 0.0) return 1.0;
1107 Complex value(0.0);
1108 // In the continuous case, we use simple gauss integration with a fixed number of integration points. We divide the interval in order to have a sufficient number of integration points by interval. It is good for low to moderate value of x, but is prohibitive for large x. In this case, we use Filon's method with linear interpolation, it means the modified trapezoidal rule as in E. O. Tuck, 'A simple "Filon-Trapezoidal" Rule'
1109 if (isContinuous())
1110 {
1111 const UnsignedInteger N = ResourceMap::GetAsUnsignedInteger("Distribution-CharacteristicFunctionNMax");
1112 // The circular function will have x(b-a)/2\pi arches over [a, b], so we need a number of points of this order, we decide to take 8 points per arch
1113 Point legendreWeights;
1114 const Point legendreNodes(getGaussNodesAndWeights(legendreWeights));
1115 // How many sub-intervals?
1116 // nPts = 8*x(b-a)/2\pi => (b-a)/2 = nPts * \pi / (8*x)
1117 const Scalar xMin = getRange().getLowerBound()[0];
1118 const Scalar xMax = getRange().getUpperBound()[0];
1119 const Scalar delta = xMax - xMin;
1120 const UnsignedInteger intervalsNumber = std::max(1, static_cast<int>(round(2 * x * delta / integrationNodesNumber_)));
1121 if (intervalsNumber * integrationNodesNumber_ < N)
1122 {
1123 const Scalar halfLength = 0.5 * delta / intervalsNumber;
1124 for (UnsignedInteger n = 0; n < intervalsNumber; ++n)
1125 {
1126 const Scalar a = xMin + 2.0 * n * halfLength;
1127 for (UnsignedInteger i = 0; i < integrationNodesNumber_; ++i)
1128 {
1129 const Scalar xi = a + (1.0 + legendreNodes[i]) * halfLength;
1130 value += legendreWeights[i] * computePDF(xi) * std::exp(Complex(0.0, x * xi));
1131 }
1132 }
1133 // We factor out the scaling as all the sub intervals have the same length
1134 value *= halfLength;
1135 }
1136 else
1137 {
1138 const Scalar a = getRange().getLowerBound()[0];
1139 const Scalar b = getRange().getUpperBound()[0];
1140 const Scalar T = 0.5 * (b - a);
1141 const Scalar c = 0.5 * (a + b);
1142 const Scalar dt = T / N;
1143 static Point pdfGrid;
1144 if (!pdfGrid.getSize())
1145 {
1146 Sample locations(Box(Indices(1, 2 * N - 1)).generate());
1147 locations *= Point(1, b - a);
1148 locations += Point(1, a);
1149 pdfGrid = computePDF(locations).getImplementation()->getData();
1150 }
1151 const Scalar omegaDt = x * dt;
1152 const Scalar omegaDt2 = omegaDt * omegaDt;
1153 const Scalar cosOmegaDt = std::cos(omegaDt);
1154 const Scalar sinOmegaDt = std::sin(omegaDt);
1155 // The bound 4.3556e-4 is such that we get full double precision
1156 const Complex wM(std::abs(omegaDt) < 4.3556e-4 ? Complex(0.5 - omegaDt2 / 24.0, omegaDt / 6.0 * (1.0 - omegaDt2 / 40.0)) : Complex((1.0 - cosOmegaDt) / omegaDt2, (omegaDt - sinOmegaDt) / omegaDt2));
1157 const Complex wP(std::abs(omegaDt) < 4.3556e-4 ? Complex(0.5 - omegaDt2 / 24.0, -omegaDt / 6.0 * (1.0 - omegaDt2 / 40.0)) : Complex((1.0 - cosOmegaDt) / omegaDt2, (-omegaDt + sinOmegaDt) / omegaDt2));
1158 const Scalar cosNOmegaDt = std::cos(N * omegaDt);
1159 const Scalar sinNOmegaDt = std::sin(N * omegaDt);
1160 // The bound 4.3556e-4 is such that we get full double precision
1161 const Scalar w = std::abs(omegaDt) < 4.3556e-4 ? std::pow(std::sin(0.5 * omegaDt) / (0.5 * omegaDt), 2) : 1.0 - omegaDt2 / 12.0;
1162 // value = pdfGrid[N] * w + pdfGrid[0] * wM * Complex(cosNOmegaDt, -sinNOmegaDt) + pdfGrid[2 * N] * wP * Complex(cosNOmegaDt, sinNOmegaDt);
1163 value = pdfGrid[0] * wM * Complex(cosNOmegaDt, -sinNOmegaDt) + pdfGrid[2 * N - 1] * wP * Complex(cosNOmegaDt, sinNOmegaDt);
1164 for (UnsignedInteger n = 1; n < N; ++n)
1165 {
1166 const Scalar cosN = std::cos(n * omegaDt);
1167 const Scalar sinN = std::sin(n * omegaDt);
1168 value += Complex(w * cosN * (pdfGrid[N + n - 1] + pdfGrid[N - n]), w * sinN * (pdfGrid[N + n - 1] - pdfGrid[N - n]));
1169 }
1170 return dt * value * Complex(std::cos(x * c), std::sin(x * c));
1171 }
1172 } // Continuous
1173 else
1174 {
1175 // Discrete
1176 // In the discrete case, we have a reasonably efficient algorithm both in term of speed and precision.
1177 if (isDiscrete())
1178 {
1179 const Sample support(getSupport());
1180 const Point probabilities(getProbabilities());
1181 const UnsignedInteger size = support.getSize();
1182 for (UnsignedInteger i = 0; i < size; ++i)
1183 {
1184 const Scalar pt = support(i, 0);
1185 value += probabilities[i] * std::exp(Complex(0.0, x * pt));
1186 }
1187 }
1188 // In the composite case, no default algorithm
1189 else
1190 {
1191 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeCharacteristicFunction(const Scalar x) const: no default algorithm to compute the characteristic function in the composite case.";
1192 }
1193 } // !Continuous
1194 return value;
1195 }
1196
computeCharacteristicFunction(const Point & x) const1197 Complex DistributionImplementation::computeCharacteristicFunction(const Point & x) const
1198 {
1199 if (dimension_ == 1) return computeCharacteristicFunction(x[0]);
1200 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeCharacteristicFunction(const Point & x) const";
1201 }
1202
computeLogCharacteristicFunction(const Scalar x) const1203 Complex DistributionImplementation::computeLogCharacteristicFunction(const Scalar x) const
1204 {
1205 const Complex value(computeCharacteristicFunction(x));
1206 const Complex result(std::log(value));
1207 return result;
1208 }
1209
computeLogCharacteristicFunction(const Point & x) const1210 Complex DistributionImplementation::computeLogCharacteristicFunction(const Point & x) const
1211 {
1212 if (dimension_ == 1) return computeLogCharacteristicFunction(x[0]);
1213 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeLogCharacteristicFunction(const Point & x) const";
1214 }
1215
computeCharacteristicFunction(const UnsignedInteger index,const Scalar step) const1216 Complex DistributionImplementation::computeCharacteristicFunction(const UnsignedInteger index,
1217 const Scalar step) const
1218 {
1219 return computeCharacteristicFunction(index * step);
1220 }
1221
computeLogCharacteristicFunction(const UnsignedInteger index,const Scalar step) const1222 Complex DistributionImplementation::computeLogCharacteristicFunction(const UnsignedInteger index,
1223 const Scalar step) const
1224 {
1225 return computeLogCharacteristicFunction(index * step);
1226 }
1227
computeCharacteristicFunction(const Indices & indices,const Point & step) const1228 Complex DistributionImplementation::computeCharacteristicFunction(const Indices & indices,
1229 const Point & step) const
1230 {
1231 Point point(dimension_);
1232 for (UnsignedInteger i = 0; i < dimension_; ++i) point[i] = indices[i] * step[i];
1233 return computeCharacteristicFunction(point);
1234 }
1235
computeLogCharacteristicFunction(const Indices & indices,const Point & step) const1236 Complex DistributionImplementation::computeLogCharacteristicFunction(const Indices & indices,
1237 const Point & step) const
1238 {
1239 Point point(dimension_);
1240 for (UnsignedInteger i = 0; i < dimension_; ++i) point[i] = indices[i] * step[i];
1241 return computeLogCharacteristicFunction(point);
1242 }
1243
1244 /* Get the generating function of the distribution, i.e. psi(z) = E(z^X) */
computeGeneratingFunction(const Scalar z) const1245 Scalar DistributionImplementation::computeGeneratingFunction(const Scalar z) const
1246 {
1247 return computeGeneratingFunction(Complex(z, 0.0)).real();
1248 }
1249
computeLogGeneratingFunction(const Scalar z) const1250 Scalar DistributionImplementation::computeLogGeneratingFunction(const Scalar z) const
1251 {
1252 return computeLogGeneratingFunction(Complex(z, 0.0)).real();
1253 }
1254
computeGeneratingFunction(const Complex & z) const1255 Complex DistributionImplementation::computeGeneratingFunction(const Complex & z) const
1256 {
1257 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: cannot use the computeCharacteristicFunction method with distributions of dimension > 1";
1258 if (!isDiscrete()) throw NotDefinedException(HERE) << "Error: cannot compute the generating function for non discrete distributions.";
1259 Complex value(0.0);
1260 // Create the generating function as a univariate polynomial. It will be used as such if the distribution is integral, or as a container for the individual probabilities if the distribution is not integral
1261 if (!isAlreadyCreatedGeneratingFunction_)
1262 {
1263 generatingFunction_ = UniVariatePolynomial(getProbabilities());
1264 isAlreadyCreatedGeneratingFunction_ = true;
1265 }
1266 // If the distribution is integral, the generating function is either a polynomial if the support is finite, or can be well approximated by such a polynomial
1267 if (isIntegral())
1268 {
1269 value = generatingFunction_(z);
1270 }
1271 // The distribution is discrete but not integral
1272 else
1273 {
1274 const Sample support(getSupport());
1275 const UnsignedInteger size = support.getSize();
1276 const Point probabilities(generatingFunction_.getCoefficients());
1277 for (UnsignedInteger i = 0; i < size; ++i)
1278 {
1279 const Scalar pt = support(i, 0);
1280 value += probabilities[i] * std::pow(z, pt);
1281 }
1282 }
1283 return value;
1284 }
1285
computeLogGeneratingFunction(const Complex & z) const1286 Complex DistributionImplementation::computeLogGeneratingFunction(const Complex & z) const
1287 {
1288 Complex value = computeGeneratingFunction(z);
1289 return std::log(value);
1290 }
1291
1292 /* Compute the entropy of the distribution */
computeEntropy() const1293 Scalar DistributionImplementation::computeEntropy() const
1294 {
1295 if (isDiscrete())
1296 {
1297 const Point probabilities(getProbabilities());
1298 Scalar entropy = 0.0;
1299 for (UnsignedInteger i = 0; i < probabilities.getSize(); ++i)
1300 {
1301 const Scalar pI = probabilities[i];
1302 if (pI > 0.0)
1303 entropy += -pI * std::log(pI);
1304 }
1305 return entropy;
1306 }
1307 if (isContinuous())
1308 {
1309 // If the components are independent the entropy is the sum of the marginal entropies
1310 // We factor the construction of the 1D quadrature rules over the marginals
1311 const Point lowerBound_(range_.getLowerBound());
1312 const Point upperBound_(range_.getUpperBound());
1313 if (hasIndependentCopula())
1314 {
1315 const GaussLegendre integrator(Indices(1, integrationNodesNumber_));
1316 const Sample nodes(integrator.getNodes());
1317 const Point weights(integrator.getWeights());
1318 Scalar entropy = 0.0;
1319 for (UnsignedInteger marginal = 0; marginal < dimension_; ++marginal)
1320 {
1321 const Implementation marginalDistribution(getMarginal(marginal).getImplementation());
1322 Point integrationBounds(1, lowerBound_[marginal]);
1323 integrationBounds.add(getSingularities());
1324 integrationBounds.add(upperBound_[marginal]);
1325 for (UnsignedInteger j = 0; j < integrationBounds.getSize() - 1; ++j)
1326 {
1327 const Scalar a = integrationBounds[j];
1328 const Scalar b = integrationBounds[j + 1];
1329 const Scalar delta = b - a;
1330 const Point logPDFAtNodes(marginalDistribution->computeLogPDF(nodes * Point(1, delta) + Point(1, a)).asPoint());
1331 for (UnsignedInteger i = 0; i < integrationNodesNumber_; ++i)
1332 {
1333 const Scalar logPI = logPDFAtNodes[i];
1334 entropy += -logPI * std::exp(logPI) * weights[i] * delta;
1335 } // integration nodes
1336 } // Singularities
1337 } // marginal
1338 return entropy;
1339 } // hasIndependentCopula()
1340 // In low dimension, use an adaptive quadrature
1341 if (dimension_ <= ResourceMap::GetAsUnsignedInteger("Distribution-SmallDimensionEntropy"))
1342 {
1343 const EntropyKernel entropyKernel(this);
1344 return IteratedQuadrature().integrate(entropyKernel, getRange())[0];
1345 } // Low dimension
1346 } // isContinuous()
1347 const UnsignedInteger size = ResourceMap::GetAsUnsignedInteger("Distribution-EntropySamplingSize");
1348 const String samplingMethod(ResourceMap::GetAsString("Distribution-EntropySamplingMethod"));
1349 Sample sample;
1350 if (samplingMethod == "MonteCarlo")
1351 sample = getSample(size);
1352 else if (samplingMethod == "QuasiMonteCarlo")
1353 sample = getSampleByQMC(size);
1354 else
1355 {
1356 LOGWARN(OSS() << "Unknown sampling method=" << samplingMethod << " to compute entropy. Resort to MonteCarlo");
1357 sample = getSample(size);
1358 }
1359 return -computeLogPDF(sample).computeMean()[0];
1360 }
1361
1362 /* Get the DDF of the distribution */
computeDDFSequential(const Sample & inSample) const1363 Sample DistributionImplementation::computeDDFSequential(const Sample & inSample) const
1364 {
1365 const UnsignedInteger size = inSample.getSize();
1366 SampleImplementation outSample(size, dimension_);
1367 UnsignedInteger shift = 0;
1368 for (UnsignedInteger i = 0; i < size; ++i)
1369 {
1370 const Point point(computeDDF(inSample[i]));
1371 std::copy(point.begin(), point.end(), outSample.data_begin() + shift);
1372 shift += dimension_;
1373 }
1374 return outSample;
1375 }
1376
1377 /* Get the DDF of the distribution */
1378 struct ComputeDDFPolicy
1379 {
1380 const Sample & input_;
1381 Sample & output_;
1382 const DistributionImplementation & distribution_;
1383
ComputeDDFPolicyComputeDDFPolicy1384 ComputeDDFPolicy( const Sample & input,
1385 Sample & output,
1386 const DistributionImplementation & distribution)
1387 : input_(input)
1388 , output_(output)
1389 , distribution_(distribution)
1390 {}
1391
operator ()ComputeDDFPolicy1392 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1393 {
1394 const UnsignedInteger dimension = input_.getDimension();
1395 UnsignedInteger shift = dimension * r.begin();
1396 for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1397 {
1398 const Point point(distribution_.computeDDF(input_[i]));
1399 std::copy(point.begin(), point.end(), output_.getImplementation()->data_begin() + shift);
1400 shift += dimension;
1401 }
1402 }
1403 }; /* end struct ComputeDDFPolicy */
1404
computeDDFParallel(const Sample & inSample) const1405 Sample DistributionImplementation::computeDDFParallel(const Sample & inSample) const
1406 {
1407 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
1408 const UnsignedInteger size = inSample.getSize();
1409 Sample result(size, 1);
1410 const ComputeDDFPolicy policy( inSample, result, *this );
1411 TBB::ParallelFor( 0, size, policy );
1412 return result;
1413 }
1414
computeDDF(const Sample & inSample) const1415 Sample DistributionImplementation::computeDDF(const Sample & inSample) const
1416 {
1417 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
1418 if (isParallel_) return computeDDFParallel(inSample);
1419 else return computeDDFSequential(inSample);
1420 }
1421
1422 /* Get the PDF of the distribution */
computePDFSequential(const Sample & inSample) const1423 Sample DistributionImplementation::computePDFSequential(const Sample & inSample) const
1424 {
1425 LOGDEBUG("In DistributionImplementation::computePDFSequential(const Sample & inSample)");
1426 const UnsignedInteger size = inSample.getSize();
1427 SampleImplementation outSample(size, 1);
1428 for (UnsignedInteger i = 0; i < size; ++i) outSample(i, 0) = computePDF(inSample[i]);
1429 return outSample;
1430 }
1431
1432 /* Get the PDF of the distribution */
1433 struct ComputePDFPolicy
1434 {
1435 const Sample & input_;
1436 Sample & output_;
1437 const DistributionImplementation & distribution_;
1438
ComputePDFPolicyComputePDFPolicy1439 ComputePDFPolicy( const Sample & input,
1440 Sample & output,
1441 const DistributionImplementation & distribution)
1442 : input_(input)
1443 , output_(output)
1444 , distribution_(distribution)
1445 {}
1446
operator ()ComputePDFPolicy1447 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1448 {
1449 for (UnsignedInteger i = r.begin(); i != r.end(); ++i) output_(i, 0) = distribution_.computePDF(input_[i]);
1450 }
1451
1452 }; /* end struct ComputePDFPolicy */
1453
computePDFParallel(const Sample & inSample) const1454 Sample DistributionImplementation::computePDFParallel(const Sample & inSample) const
1455 {
1456 LOGDEBUG("In DistributionImplementation::computePDFParallel(const Sample & inSample)");
1457 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
1458 const UnsignedInteger size = inSample.getSize();
1459 Sample result(size, 1);
1460 const ComputePDFPolicy policy( inSample, result, *this );
1461 TBB::ParallelFor( 0, size, policy );
1462 return result;
1463 }
1464
computePDF(const Sample & inSample) const1465 Sample DistributionImplementation::computePDF(const Sample & inSample) const
1466 {
1467 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
1468 if (isParallel_) return computePDFParallel(inSample);
1469 else return computePDFSequential(inSample);
1470 }
1471
1472 /* Get the Log PDF of the distribution */
computeLogPDFSequential(const Sample & inSample) const1473 Sample DistributionImplementation::computeLogPDFSequential(const Sample & inSample) const
1474 {
1475 const UnsignedInteger size = inSample.getSize();
1476 Sample outSample(size, 1);
1477 for (UnsignedInteger i = 0; i < size; ++i) outSample(i, 0) = computeLogPDF(inSample[i]);
1478 return outSample;
1479 }
1480
1481 /* Get the LogPDF of the distribution */
1482 struct ComputeLogPDFPolicy
1483 {
1484 const Sample & input_;
1485 Sample & output_;
1486 const DistributionImplementation & distribution_;
1487
ComputeLogPDFPolicyComputeLogPDFPolicy1488 ComputeLogPDFPolicy( const Sample & input,
1489 Sample & output,
1490 const DistributionImplementation & distribution)
1491 : input_(input)
1492 , output_(output)
1493 , distribution_(distribution)
1494 {}
1495
operator ()ComputeLogPDFPolicy1496 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1497 {
1498 for (UnsignedInteger i = r.begin(); i != r.end(); ++i) output_(i, 0) = distribution_.computeLogPDF(input_[i]);
1499 }
1500
1501 }; /* end struct ComputeLogPDFPolicy */
1502
computeLogPDFParallel(const Sample & inSample) const1503 Sample DistributionImplementation::computeLogPDFParallel(const Sample & inSample) const
1504 {
1505 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
1506 const UnsignedInteger size = inSample.getSize();
1507 Sample result(size, 1);
1508 const ComputeLogPDFPolicy policy( inSample, result, *this );
1509 TBB::ParallelFor( 0, size, policy );
1510 return result;
1511 }
1512
computeLogPDF(const Sample & inSample) const1513 Sample DistributionImplementation::computeLogPDF(const Sample & inSample) const
1514 {
1515 if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension();
1516 if (isParallel_) return computeLogPDFParallel(inSample);
1517 else return computeLogPDFSequential(inSample);
1518 }
1519
1520 /* Get the DDF of the distribution */
computeDDF(const Scalar scalar) const1521 Scalar DistributionImplementation::computeDDF(const Scalar scalar) const
1522 {
1523 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "ERROR: cannot use the simplified interface of computeDDF with distributions of dimension > 1";
1524 return computeDDF(Point(1, scalar))[0];
1525 }
1526
1527 /* Get the PDF of the distribution */
computePDF(const Scalar scalar) const1528 Scalar DistributionImplementation::computePDF(const Scalar scalar) const
1529 {
1530 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "ERROR: cannot use the simplified interface of computePDF with distributions of dimension > 1";
1531 return computePDF(Point(1, scalar));
1532 }
1533
computeLogPDF(const Scalar scalar) const1534 Scalar DistributionImplementation::computeLogPDF(const Scalar scalar) const
1535 {
1536 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "ERROR: cannot use the simplified interface of computeLogPDF with distributions of dimension > 1";
1537 return computeLogPDF(Point(1, scalar));
1538 }
1539
1540 /* Get the CDF of the distribution */
computeCDF(const Scalar scalar) const1541 Scalar DistributionImplementation::computeCDF(const Scalar scalar) const
1542 {
1543 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "ERROR: cannot use the simplified interface of computeCDF with distributions of dimension > 1";
1544 return computeCDF(Point(1, scalar));
1545 }
1546
computeComplementaryCDF(const Scalar scalar) const1547 Scalar DistributionImplementation::computeComplementaryCDF(const Scalar scalar) const
1548 {
1549 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "ERROR: cannot use the simplified interface of computeComplementaryCDF with distributions of dimension > 1";
1550 return computeComplementaryCDF(Point(1, scalar));
1551 }
1552
computeSurvivalFunction(const Scalar scalar) const1553 Scalar DistributionImplementation::computeSurvivalFunction(const Scalar scalar) const
1554 {
1555 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "ERROR: cannot use the simplified interface of computeSurvivalFunction with distributions of dimension > 1";
1556 return computeSurvivalFunction(Point(1, scalar));
1557 }
1558
1559 /* Compute the PDF of 1D distributions over a regular grid */
computePDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,Sample & grid) const1560 Sample DistributionImplementation::computePDF(const Scalar xMin,
1561 const Scalar xMax,
1562 const UnsignedInteger pointNumber,
1563 Sample & grid) const
1564 {
1565 return computePDF(Point(1, xMin), Point(1, xMax), Indices(1, pointNumber), grid);
1566 }
1567
1568 /* Compute the PDF of nD distributions over a regular grid */
computePDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,Sample & grid) const1569 Sample DistributionImplementation::computePDF(const Point & xMin,
1570 const Point & xMax,
1571 const Indices & pointNumber,
1572 Sample & grid) const
1573 {
1574 if (xMin.getDimension() != xMax.getDimension()) throw InvalidArgumentException(HERE) << "Error: the two corner points must have the same dimension. Here, dim(xMin)=" << xMin.getDimension() << " and dim(xMax)=" << xMax.getDimension();
1575 if (xMin.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the corner points must have the same dimension as the distribution. Here, dim(xMin)=" << xMin.getDimension() << " and distribution dimension=" << dimension_;
1576 if (dimension_ != pointNumber.getSize()) throw InvalidArgumentException(HERE) << "Error: the discretization must match the distribution dimension. Here, dim(discretization)=" << pointNumber.getSize() << " and distribution dimension=" << dimension_;
1577 IndicesCollection indices(Tuples(pointNumber).generate());
1578 const UnsignedInteger size = indices.getSize();
1579 Sample inputSample(indices.getSize(), dimension_);
1580 for (UnsignedInteger i = 0; i < size; ++i)
1581 for (UnsignedInteger j = 0; j < dimension_; ++j) inputSample(i, j) = xMin[j] + indices(i, j) * (xMax[j] - xMin[j]) / (pointNumber[j] - 1.0);
1582 grid = inputSample;
1583 return computePDF(inputSample);
1584 }
1585
1586 /* Compute the log-PDF of 1D distributions over a regular grid */
computeLogPDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,Sample & grid) const1587 Sample DistributionImplementation::computeLogPDF(const Scalar xMin,
1588 const Scalar xMax,
1589 const UnsignedInteger pointNumber,
1590 Sample & grid) const
1591 {
1592 return computeLogPDF(Point(1, xMin), Point(1, xMax), Indices(1, pointNumber), grid);
1593 }
1594
1595 /* Compute the log-PDF of nD distributions over a regular grid */
computeLogPDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,Sample & grid) const1596 Sample DistributionImplementation::computeLogPDF(const Point & xMin,
1597 const Point & xMax,
1598 const Indices & pointNumber,
1599 Sample & grid) const
1600 {
1601 if (xMin.getDimension() != xMax.getDimension()) throw InvalidArgumentException(HERE) << "Error: the two corner points must have the same dimension. Here, dim(xMin)=" << xMin.getDimension() << " and dim(xMax)=" << xMax.getDimension();
1602 if (xMin.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the corner points must have the same dimension as the distribution. Here, dim(xMin)=" << xMin.getDimension() << " and distribution dimension=" << dimension_;
1603 if (dimension_ != pointNumber.getSize()) throw InvalidArgumentException(HERE) << "Error: the discretization must match the distribution dimension. Here, dim(discretization)=" << pointNumber.getSize() << " and distribution dimension=" << dimension_;
1604 IndicesCollection indices(Tuples(pointNumber).generate());
1605 const UnsignedInteger size = indices.getSize();
1606 Sample inputSample(indices.getSize(), dimension_);
1607 for (UnsignedInteger i = 0; i < size; ++i)
1608 for (UnsignedInteger j = 0; j < dimension_; ++j) inputSample(i, j) = xMin[j] + indices(i, j) * (xMax[j] - xMin[j]) / (pointNumber[j] - 1.0);
1609 grid = inputSample;
1610 return computeLogPDF(inputSample);
1611 }
1612
1613 /* Compute the CDF of 1D distributions over a regular grid */
computeCDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,Sample & grid) const1614 Sample DistributionImplementation::computeCDF(const Scalar xMin,
1615 const Scalar xMax,
1616 const UnsignedInteger pointNumber,
1617 Sample & grid) const
1618 {
1619 return computeCDF(Point(1, xMin), Point(1, xMax), Indices(1, pointNumber), grid);
1620 }
1621
1622 /* Compute the CDF of nD distributions over a regular grid */
computeCDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,Sample & grid) const1623 Sample DistributionImplementation::computeCDF(const Point & xMin,
1624 const Point & xMax,
1625 const Indices & pointNumber,
1626 Sample & grid) const
1627 {
1628 if (xMin.getDimension() != xMax.getDimension()) throw InvalidArgumentException(HERE) << "Error: the two corner points must have the same dimension. Here, dim(xMin)=" << xMin.getDimension() << " and dim(xMax)=" << xMax.getDimension();
1629 if (xMin.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the corner points must have the same dimension as the distribution. Here, dim(xMin)=" << xMin.getDimension() << " and distribution dimension=" << dimension_;
1630 if (dimension_ != pointNumber.getSize()) throw InvalidArgumentException(HERE) << "Error: the discretization must match the distribution dimension. Here, dim(discretization)=" << pointNumber.getSize() << " and distribution dimension=" << dimension_;
1631 IndicesCollection indices(Tuples(pointNumber).generate());
1632 const UnsignedInteger size = indices.getSize();
1633 Sample inputSample(indices.getSize(), dimension_);
1634 for (UnsignedInteger i = 0; i < size; ++i)
1635 for (UnsignedInteger j = 0; j < dimension_; ++j) inputSample(i, j) = xMin[j] + indices(i, j) * (xMax[j] - xMin[j]) / (pointNumber[j] - 1.0);
1636 grid = inputSample;
1637 return computeCDF(inputSample);
1638 }
1639
computeComplementaryCDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,Sample & grid) const1640 Sample DistributionImplementation::computeComplementaryCDF(const Scalar xMin,
1641 const Scalar xMax,
1642 const UnsignedInteger pointNumber,
1643 Sample & grid) const
1644 {
1645 if (dimension_ != 1) throw InvalidArgumentException(HERE) << "Error: cannot compute the CDF over a regular 1D grid if the dimension is > 1";
1646 Sample result(pointNumber, 2);
1647 Scalar x = xMin;
1648 Scalar step = (xMax - xMin) / Scalar(pointNumber - 1.0);
1649 grid = Sample(pointNumber, 1);
1650 for (UnsignedInteger i = 0; i < pointNumber; ++i)
1651 {
1652 grid(i, 0) = x;
1653 result(i, 0) = x;
1654 result(i, 1) = computeComplementaryCDF(x);
1655 x += step;
1656 }
1657 return result;
1658 }
1659
1660 /* Compute the quantile over a regular grid */
computeQuantile(const Scalar qMin,const Scalar qMax,const UnsignedInteger pointNumber,const Bool tail) const1661 Sample DistributionImplementation::computeQuantile(const Scalar qMin,
1662 const Scalar qMax,
1663 const UnsignedInteger pointNumber,
1664 const Bool tail) const
1665 {
1666 Sample grid;
1667 return computeQuantile(qMin, qMax, pointNumber, grid, tail);
1668 }
1669
computeQuantile(const Scalar qMin,const Scalar qMax,const UnsignedInteger pointNumber,Sample & grid,const Bool tail) const1670 Sample DistributionImplementation::computeQuantile(const Scalar qMin,
1671 const Scalar qMax,
1672 const UnsignedInteger pointNumber,
1673 Sample & grid,
1674 const Bool tail) const
1675 {
1676 // First, build the regular grid for the quantile levels
1677 grid = Sample(pointNumber, 1);
1678 for (UnsignedInteger i = 0; i < pointNumber; ++i) grid(i, 0) = qMin + i * (qMax - qMin) / (pointNumber - 1.0);
1679 // Use possible parallelization
1680 return computeQuantile(grid.getImplementation()->getData(), tail);
1681 }
1682
1683 /* Compute the quantile over a provided grid */
computeQuantileSequential(const Point & prob,const Bool tail) const1684 Sample DistributionImplementation::computeQuantileSequential(const Point & prob,
1685 const Bool tail) const
1686 {
1687 const UnsignedInteger size = prob.getSize();
1688 SampleImplementation result(size, dimension_);
1689 UnsignedInteger shift = 0;
1690 for ( UnsignedInteger i = 0; i < size; ++ i )
1691 {
1692 const Point point(computeQuantile(prob[i], tail));
1693 std::copy(point.begin(), point.end(), result.data_begin() + shift);
1694 shift += dimension_;
1695 }
1696 return result;
1697 }
1698
1699 struct ComputeQuantilePolicy
1700 {
1701 const Point & prob_;
1702 Sample & output_;
1703 Bool tail_;
1704 const DistributionImplementation & distribution_;
1705
ComputeQuantilePolicyComputeQuantilePolicy1706 ComputeQuantilePolicy( const Point & prob,
1707 Sample & output,
1708 const Bool tail,
1709 const DistributionImplementation & distribution)
1710 : prob_(prob)
1711 , output_(output)
1712 , tail_(tail)
1713 , distribution_(distribution)
1714 {}
1715
operator ()ComputeQuantilePolicy1716 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1717 {
1718 const UnsignedInteger dimension = distribution_.getDimension();
1719 UnsignedInteger shift = dimension * r.begin();
1720 for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1721 {
1722 const Point point(distribution_.computeQuantile(prob_[i], tail_));
1723 std::copy(point.begin(), point.end(), output_.getImplementation()->data_begin() + shift);
1724 shift += dimension;
1725 }
1726 }
1727
1728 }; /* end struct ComputeQuantilePolicy */
1729
computeQuantileParallel(const Point & prob,const Bool tail) const1730 Sample DistributionImplementation::computeQuantileParallel(const Point & prob,
1731 const Bool tail) const
1732 {
1733 const UnsignedInteger size = prob.getSize();
1734 Sample result(size, dimension_);
1735 const ComputeQuantilePolicy policy( prob, result, tail, *this );
1736 TBB::ParallelFor( 0, size, policy );
1737 return result;
1738 }
1739
computeQuantile(const Point & prob,const Bool tail) const1740 Sample DistributionImplementation::computeQuantile(const Point & prob,
1741 const Bool tail) const
1742 {
1743 if (isParallel_) return computeQuantileParallel(prob, tail);
1744 else return computeQuantileSequential(prob, tail);
1745 }
1746
1747 /* Get the PDF gradient of the distribution */
computePDFGradient(const Point & point) const1748 Point DistributionImplementation::computePDFGradient(const Point & point) const
1749 {
1750 const Point initialParameters(getParameter());
1751 const UnsignedInteger parametersDimension = initialParameters.getDimension();
1752 Point PDFGradient(parametersDimension);
1753 // Clone the distribution
1754 Implementation cloneDistribution(clone());
1755 // Increment for centered differences
1756 const Scalar eps = std::pow(ResourceMap::GetAsScalar("DistFunc-Precision"), 1.0 / 3.0);
1757 // Increment for noncentered differences
1758 const Scalar eps2 = std::pow(ResourceMap::GetAsScalar("DistFunc-Precision"), 1.0 / 2.0);
1759 Point newParameters(initialParameters);
1760 for (UnsignedInteger i = 0; i < parametersDimension; ++i)
1761 {
1762 Scalar delta = 0.0;
1763 Scalar rightPDF = 0.0;
1764 // We will try a centered finite difference approximation
1765 try
1766 {
1767 newParameters[i] = initialParameters[i] + eps;
1768 cloneDistribution->setParameter(newParameters);
1769 rightPDF = cloneDistribution->computePDF(point);
1770 delta += eps;
1771 }
1772 catch (...)
1773 {
1774 // If something went wrong with the right point, stay at the center point
1775 newParameters[i] = initialParameters[i];
1776 cloneDistribution->setParameter(newParameters);
1777 rightPDF = cloneDistribution->computePDF(point);
1778 }
1779 Scalar leftPDF = 0.0;
1780 try
1781 {
1782 // If something is wrong with the right point, use non-centered finite differences
1783 const Scalar leftEpsilon = delta == 0.0 ? eps2 : eps;
1784 newParameters[i] = initialParameters[i] - leftEpsilon;
1785 cloneDistribution->setParameter(newParameters);
1786 leftPDF = cloneDistribution->computePDF(point);
1787 delta += leftEpsilon;
1788 }
1789 catch (...)
1790 {
1791 // If something is wrong with the left point, it is either because the gradient is not computable or because we must use non-centered finite differences, in which case the right point has to be recomputed
1792 if (delta == 0.0) throw InvalidArgumentException(HERE) << "Error: cannot compute the PDF gradient at x=" << point << " for the current values of the parameters=" << initialParameters;
1793 newParameters[i] = initialParameters[i] + eps2;
1794 cloneDistribution->setParameter(newParameters);
1795 rightPDF = cloneDistribution->computePDF(point);
1796 delta += eps2;
1797 // And the left point will be the center point
1798 newParameters[i] = initialParameters[i];
1799 cloneDistribution->setParameter(newParameters);
1800 leftPDF = cloneDistribution->computePDF(point);
1801 }
1802 PDFGradient[i] = (rightPDF - leftPDF) / delta;
1803 newParameters[i] = initialParameters[i];
1804 }
1805 return PDFGradient;
1806 }
1807
1808 /* ComputePDFGradient On a Sample */
computePDFGradient(const Sample & inSample) const1809 Sample DistributionImplementation::computePDFGradient(const Sample & inSample) const
1810 {
1811 const UnsignedInteger size = inSample.getSize();
1812 Sample outSample(size, 0);
1813 for (UnsignedInteger i = 0; i < size; ++i)
1814 {
1815 Point grad(computePDFGradient(inSample[i]));
1816 if (i == 0)
1817 outSample = Sample(size, grad.getDimension());
1818 outSample[i] = grad;
1819 }
1820 return outSample;
1821 }
1822
1823 /* Get the logPDF gradient of the distribution */
computeLogPDFGradient(const Point & point) const1824 Point DistributionImplementation::computeLogPDFGradient(const Point & point) const
1825 {
1826 // log(u)' = u' / u for any u function
1827 // With u(theta|point) = PDF(theta|point), du(theta|point)/dtheta = PDFGradient(theta|point)
1828 const Scalar pdf = computePDF(point);
1829 if (pdf > 0)
1830 {
1831 const Point logPDFGradient(computePDFGradient(point) / pdf);
1832 return logPDFGradient;
1833 }
1834 else
1835 // LogPDFGradient is used to maximize the log-likelihood for exponential models
1836 // if pdf is zero the u'/u has undetermined form (for exponential models we could extract
1837 // the correct determined form)
1838 return Point(getParameterDimension(), 0.0);
1839 }
1840
1841 /* Get the LogPDFGradient of the distributionImplementation */
1842 struct ComputeLogPDFGradientPolicy
1843 {
1844 const Sample & input_;
1845 Sample & output_;
1846 const DistributionImplementation & distribution_;
1847
ComputeLogPDFGradientPolicyComputeLogPDFGradientPolicy1848 ComputeLogPDFGradientPolicy( const Sample & input,
1849 Sample & output,
1850 const DistributionImplementation & distribution)
1851 : input_(input)
1852 , output_(output)
1853 , distribution_(distribution)
1854 {}
1855
operator ()ComputeLogPDFGradientPolicy1856 inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1857 {
1858 for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1859 {
1860 const Point out = distribution_.computeLogPDFGradient(input_[i]);
1861 for (UnsignedInteger j = 0; j < output_.getDimension(); ++j)
1862 output_(i, j) = out[j];
1863 }
1864 }
1865 }; /* end struct ComputeLogPDFGradientPolicy */
1866
1867 /* Get the logPDF gradient of the distribution (sequential implementation) */
computeLogPDFGradientSequential(const Sample & sample) const1868 Sample DistributionImplementation::computeLogPDFGradientSequential(const Sample & sample) const
1869 {
1870 const UnsignedInteger size = sample.getSize();
1871 Sample outSample(size, getParameterDimension());
1872 // Sequential implementation
1873 for (UnsignedInteger i = 0; i < size; ++i)
1874 {
1875 const Point result = computeLogPDFGradient(sample[i]);
1876 for (UnsignedInteger j = 0; j < getParameterDimension(); ++j)
1877 outSample(i, j) = result[j];
1878 }
1879 return outSample;
1880 }
1881
1882 /* Get the logPDF gradient of the distribution (parallel implementation) */
computeLogPDFGradientParallel(const Sample & sample) const1883 Sample DistributionImplementation::computeLogPDFGradientParallel(const Sample & sample) const
1884 {
1885 const UnsignedInteger size = sample.getSize();
1886 Sample outSample(size, getParameterDimension());
1887 const ComputeLogPDFGradientPolicy policy( sample, outSample, *this );
1888 TBB::ParallelFor( 0, size, policy );
1889 return outSample;
1890 }
1891
1892 /* Get the logPDF gradient of the distribution */
computeLogPDFGradient(const Sample & inSample) const1893 Sample DistributionImplementation::computeLogPDFGradient(const Sample & inSample) const
1894 {
1895 if (isParallel_) return computeLogPDFGradientParallel(inSample);
1896 else return computeLogPDFGradientSequential(inSample);
1897 }
1898
1899 /* ComputeCDFGradient On a Sample */
computeCDFGradient(const Sample & inSample) const1900 Sample DistributionImplementation::computeCDFGradient(const Sample & inSample) const
1901 {
1902 const UnsignedInteger size = inSample.getSize();
1903 SampleImplementation outSample(size, getParameterDimension());
1904 UnsignedInteger shift = 0;
1905 for (UnsignedInteger i = 0; i < size; ++i)
1906 {
1907 const Point point(computeCDFGradient(inSample[i]));
1908 std::copy(point.begin(), point.end(), outSample.data_begin() + shift);
1909 shift += dimension_;
1910 }
1911 return outSample;
1912 }
1913
1914 /* Get the CDF gradient of the distribution */
computeCDFGradient(const Point & point) const1915 Point DistributionImplementation::computeCDFGradient(const Point & point) const
1916 {
1917 const Point initialParameters(getParameter());
1918 const UnsignedInteger parametersDimension = initialParameters.getDimension();
1919 Point CDFGradient(parametersDimension);
1920 // Clone the distribution
1921 Implementation cloneDistribution(clone());
1922 // We will use centered differences
1923 const Scalar eps = std::pow(ResourceMap::GetAsScalar("DistFunc-Precision"), 1.0 / 3.0);
1924 // Increment for noncentered differences
1925 const Scalar eps2 = std::pow(ResourceMap::GetAsScalar("DistFunc-Precision"), 1.0 / 2.0);
1926 Point newParameters(initialParameters);
1927 for (UnsignedInteger i = 0; i < parametersDimension; ++i)
1928 {
1929 Scalar delta = 0.0;
1930 Scalar rightCDF = 0.0;
1931 // We will try a centered finite difference approximation
1932 try
1933 {
1934 newParameters[i] = initialParameters[i] + eps;
1935 cloneDistribution->setParameter(newParameters);
1936 rightCDF = cloneDistribution->computeCDF(point);
1937 delta += eps;
1938 }
1939 catch (...)
1940 {
1941 // If something went wrong with the right point, stay at the center point
1942 newParameters[i] = initialParameters[i];
1943 cloneDistribution->setParameter(newParameters);
1944 rightCDF = cloneDistribution->computeCDF(point);
1945 }
1946 Scalar leftCDF = 0.0;
1947 try
1948 {
1949 // If something is wrong with the right point, use non-centered finite differences
1950 const Scalar leftEpsilon = delta == 0.0 ? eps2 : eps;
1951 newParameters[i] = initialParameters[i] - leftEpsilon;
1952 cloneDistribution->setParameter(newParameters);
1953 leftCDF = cloneDistribution->computeCDF(point);
1954 delta += leftEpsilon;
1955 }
1956 catch (...)
1957 {
1958 // If something is wrong with the left point, it is either because the gradient is not computable or because we must use non-centered finite differences, in which case the right point has to be recomputed
1959 if (delta == 0.0) throw InvalidArgumentException(HERE) << "Error: cannot compute the CDF gradient at x=" << point << " for the current values of the parameters=" << initialParameters;
1960 newParameters[i] = initialParameters[i] + eps2;
1961 cloneDistribution->setParameter(newParameters);
1962 rightCDF = cloneDistribution->computeCDF(point);
1963 delta += eps2;
1964 // And the left point will be the center point
1965 newParameters[i] = initialParameters[i];
1966 cloneDistribution->setParameter(newParameters);
1967 leftCDF = cloneDistribution->computeCDF(point);
1968 }
1969 CDFGradient[i] = (rightCDF - leftCDF) / delta;
1970 newParameters[i] = initialParameters[i];
1971 }
1972 return CDFGradient;
1973 }
1974
1975 /* Build a C1 interpolation of the CDF function for 1D continuous distributions */
interpolatePDFCDF(const UnsignedInteger n)1976 Collection<PiecewiseHermiteEvaluation> DistributionImplementation::interpolatePDFCDF(const UnsignedInteger n)
1977 {
1978 if (!isContinuous()) throw InternalException(HERE) << "Error: cannot interpolate the PDF and CDF of noncontinuous distributions.";
1979 if (dimension_ != 1) throw NotYetImplementedException(HERE) << "In DistributionImplementation::interpolatePDFCDF(const UnsignedInteger n): cannot interpolate CDF for multidimensional distributions.";
1980 const Scalar xMin = getRange().getLowerBound()[0];
1981 const Scalar xMax = getRange().getUpperBound()[0];
1982 const Scalar mu = getMean()[0];
1983 // Here we use an absolute precision of 0.0 in order to force the algorithm to use all the available discretization points
1984 GaussKronrod algorithm(n - 1, cdfEpsilon_ * cdfEpsilon_, GaussKronrodRule::G3K7);
1985 const PDFWrapper pdfWrapper(this);
1986 Scalar error = -1.0;
1987 Point ai;
1988 Point bi;
1989 Sample fi;
1990 Point ei;
1991 algorithm.integrate(pdfWrapper, xMin, mu, error, ai, bi, fi, ei);
1992 ai.add(mu);
1993 Sample locationsCDF(ai.getSize(), 1);
1994 locationsCDF.getImplementation()->setData(ai);
1995 locationsCDF = locationsCDF.sort(0);
1996 algorithm.integrate(pdfWrapper, mu, xMax, error, ai, bi, fi, ei);
1997 ai.add(xMax);
1998 Sample locationsCCDF(ai.getSize(), 1);
1999 locationsCCDF.getImplementation()->setData(ai);
2000 locationsCCDF = locationsCCDF.sort(0);
2001 Collection<PiecewiseHermiteEvaluation> coll(4);
2002 const Sample valuesCDF(computeCDF(locationsCDF));
2003 const Sample valuesPDF(computePDF(locationsCDF));
2004 const Sample valuesDDF(computeDDF(locationsCDF));
2005 coll[0] = PiecewiseHermiteEvaluation(locationsCDF.getImplementation()->getData(), valuesPDF, valuesDDF);
2006 coll[1] = PiecewiseHermiteEvaluation(locationsCDF.getImplementation()->getData(), valuesCDF, valuesPDF);
2007 const Sample valuesCCDF(computeComplementaryCDF(locationsCCDF));
2008 const Sample valuesCPDF(computePDF(locationsCCDF));
2009 Sample derivativesCCDF(valuesCPDF);
2010 derivativesCCDF *= Point(1, -1.0);
2011 const Sample valuesCDDF(computeDDF(locationsCCDF));
2012 coll[2] = PiecewiseHermiteEvaluation(locationsCCDF.getImplementation()->getData(), valuesCPDF, valuesCDDF);
2013 coll[3] = PiecewiseHermiteEvaluation(locationsCCDF.getImplementation()->getData(), valuesCCDF, derivativesCCDF);
2014 return coll;
2015 }
2016
2017 /* Compute the DDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalDDF(const Scalar,const Point &) const2018 Scalar DistributionImplementation::computeConditionalDDF(const Scalar,
2019 const Point & ) const
2020 {
2021 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeConditionalDDF(const Scalar x, const Point & y) const";
2022 }
2023
2024 /* Compute the DDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeSequentialConditionalDDF(const Point &) const2025 Point DistributionImplementation::computeSequentialConditionalDDF(const Point & ) const
2026 {
2027 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeConditionalDDF(const Point & x) const";
2028 }
2029
2030 /* Compute the PDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalPDF(const Scalar x,const Point & y) const2031 Scalar DistributionImplementation::computeConditionalPDF(const Scalar x,
2032 const Point & y) const
2033 {
2034 const UnsignedInteger conditioningDimension = y.getDimension();
2035 if (conditioningDimension >= dimension_) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional PDF with a conditioning point of dimension greater or equal to the distribution dimension.";
2036 // Special case for no conditioning or independent copula
2037 if ((conditioningDimension == 0) || (hasIndependentCopula())) return getMarginal(conditioningDimension).computePDF(x);
2038 // General case
2039 Indices conditioning(conditioningDimension);
2040 conditioning.fill();
2041 Indices conditioned(conditioning);
2042 conditioned.add(conditioningDimension);
2043 const Implementation conditioningDistribution(getMarginal(conditioning).getImplementation());
2044 const Scalar pdfConditioning = conditioningDistribution->computePDF(y);
2045 if (pdfConditioning <= 0.0) return 0.0;
2046 Point z(y);
2047 z.add(x);
2048 const Implementation conditionedDistribution(getMarginal(conditioned).getImplementation());
2049 const Scalar pdfConditioned = conditionedDistribution->computePDF(z);
2050 return pdfConditioned / pdfConditioning;
2051 }
2052
2053 /* Compute the PDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeSequentialConditionalPDF(const Point & x) const2054 Point DistributionImplementation::computeSequentialConditionalPDF(const Point & x) const
2055 {
2056 if (x.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: expected a point of dimension=" << dimension_ << ", got dimension=" << x.getDimension();
2057 Point result(dimension_);
2058 Indices conditioning(1, 0);
2059 Implementation conditioningDistribution(getMarginal(conditioning).getImplementation());
2060 Point currentX(1, x[0]);
2061 Scalar pdfConditioning = conditioningDistribution->computePDF(currentX);
2062 result[0] = pdfConditioning;
2063 for (UnsignedInteger conditioningDimension = 1; conditioningDimension < dimension_; ++conditioningDimension)
2064 {
2065 // Return the result as soon as a conditional pdf is zero
2066 if (pdfConditioning == 0) return result;
2067 conditioning.add(conditioningDimension);
2068 conditioningDistribution = getMarginal(conditioning).getImplementation();
2069 currentX.add(x[conditioningDimension]);
2070 const Scalar pdfConditioned = conditioningDistribution->computePDF(currentX);
2071 result[conditioningDimension] = pdfConditioned / pdfConditioning;
2072 pdfConditioning = pdfConditioned;
2073 } // conditioningDimension
2074 return result;
2075 }
2076
2077 /* Compute the PDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalPDF(const Point & x,const Sample & y) const2078 Point DistributionImplementation::computeConditionalPDF(const Point & x,
2079 const Sample & y) const
2080 {
2081 const UnsignedInteger conditioningDimension = y.getDimension();
2082 if (conditioningDimension >= dimension_) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional PDF with a conditioning point of dimension greater or equal to the distribution dimension.";
2083 // Convert the values in x into a Sample
2084 const UnsignedInteger size = x.getDimension();
2085 SampleImplementation xAsSample(size, 1);
2086 xAsSample.setData(x);
2087 // Special case for no conditioning or independent copula
2088 if ((conditioningDimension == 0) || (hasIndependentCopula()))
2089 return getMarginal(conditioningDimension).computePDF(xAsSample).getImplementation()->getData();
2090 // General case
2091 Indices conditioning(conditioningDimension);
2092 conditioning.fill();
2093 Indices conditioned(conditioning);
2094 conditioned.add(conditioningDimension);
2095 const Implementation conditioningDistribution(getMarginal(conditioning).getImplementation());
2096 const Sample pdfConditioning(conditioningDistribution->computePDF(y));
2097 Sample z(y);
2098 z.stack(xAsSample);
2099 const Implementation conditionedDistribution(getMarginal(conditioned).getImplementation());
2100 const Sample pdfConditioned(conditionedDistribution->computePDF(z));
2101 Point result(size);
2102 for (UnsignedInteger i = 0; i < size; ++i)
2103 if (pdfConditioning(i, 0) > 0.0) result[i] = pdfConditioned(i, 0) / pdfConditioning(i, 0);
2104 return result;
2105 }
2106
2107 /* Compute the CDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalCDF(const Scalar x,const Point & y) const2108 Scalar DistributionImplementation::computeConditionalCDF(const Scalar x,
2109 const Point & y) const
2110 {
2111 const UnsignedInteger conditioningDimension = y.getDimension();
2112 if (conditioningDimension >= dimension_) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional CDF with a conditioning point of dimension greater or equal to the distribution dimension.";
2113 // Special case for no conditioning or independent copula
2114 if ((conditioningDimension == 0) || (hasIndependentCopula())) return getMarginal(conditioningDimension).computeCDF(x);
2115 // General case
2116 Indices conditioning(conditioningDimension);
2117 conditioning.fill();
2118 Indices conditioned(conditioning);
2119 conditioned.add(conditioningDimension);
2120 const Implementation conditioningDistribution(getMarginal(conditioning).getImplementation());
2121 const Scalar pdfConditioning = conditioningDistribution->computePDF(y);
2122 if (pdfConditioning <= 0.0) return 0.0;
2123 const Implementation conditionedDistribution(getMarginal(conditioned).getImplementation());
2124 const Scalar xMin = conditionedDistribution->getRange().getLowerBound()[conditioningDimension];
2125 if (x <= xMin) return 0.0;
2126 const Scalar xMax = conditionedDistribution->getRange().getUpperBound()[conditioningDimension];
2127 if (x >= xMax) return 1.0;
2128 // Numerical integration with respect to x
2129 Pointer<ConditionalPDFWrapper> p_conditionalPDFWrapper = new ConditionalPDFWrapper(conditionedDistribution);
2130 p_conditionalPDFWrapper->setParameter(y);
2131 GaussKronrod algo;
2132 const Scalar value = algo.integrate(UniVariateFunction(p_conditionalPDFWrapper), xMin, x);
2133 return std::min(1.0, std::max(0.0, value / pdfConditioning));
2134 }
2135
2136 /* Compute the CDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeSequentialConditionalCDF(const Point & x) const2137 Point DistributionImplementation::computeSequentialConditionalCDF(const Point & x) const
2138 {
2139 if (x.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: expected a point of dimension=" << dimension_ << ", got dimension=" << x.getDimension();
2140 Point result(dimension_);
2141 Indices conditioning(1, 0);
2142 Implementation conditioningDistribution(getMarginal(conditioning).getImplementation());
2143 Point currentX(1, x[0]);
2144 result[0] = conditioningDistribution->computeCDF(currentX);
2145 Scalar pdfConditioning = conditioningDistribution->computePDF(currentX);
2146 GaussKronrod algo;
2147 for (UnsignedInteger conditioningDimension = 1; conditioningDimension < dimension_; ++conditioningDimension)
2148 {
2149 // Return the result as soon as a conditional pdf is zero
2150 if (pdfConditioning == 0) return result;
2151 // If the current component is at the left of the marginal range, the conditional CDF is zero as well as the PDF
2152 const Scalar xMin = range_.getLowerBound()[conditioningDimension];
2153 if (x[conditioningDimension] <= xMin) return result;
2154 conditioning.add(conditioningDimension);
2155 conditioningDistribution = getMarginal(conditioning).getImplementation();
2156 // If the current component is at the left of the marginal range, the conditional CDF is one and the conditional PDF is zero
2157 const Scalar xMax = range_.getUpperBound()[conditioningDimension];
2158 if (x[conditioningDimension] >= xMax)
2159 {
2160 result[conditioningDimension] = 1.0;
2161 }
2162 else
2163 {
2164 // Here we have to integrate something...
2165 Pointer<ConditionalPDFWrapper> p_conditionalPDFWrapper = new ConditionalPDFWrapper(conditioningDistribution);
2166 p_conditionalPDFWrapper->setParameter(currentX);
2167 const Scalar cdfConditioned = algo.integrate(UniVariateFunction(p_conditionalPDFWrapper), xMin, std::min(x[conditioningDimension], xMax));
2168 result[conditioningDimension] = cdfConditioned / pdfConditioning;
2169 }
2170 currentX.add(x[conditioningDimension]);
2171 pdfConditioning = conditioningDistribution->computePDF(currentX);
2172 } // conditioningDimension
2173 return result;
2174 }
2175
2176 /* Compute the CDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalCDF(const Point & x,const Sample & y) const2177 Point DistributionImplementation::computeConditionalCDF(const Point & x,
2178 const Sample & y) const
2179 {
2180 const UnsignedInteger conditioningDimension = y.getDimension();
2181 if (conditioningDimension >= dimension_) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional CDF with a conditioning point of dimension greater or equal to the distribution dimension.";
2182 // Convert the values in x into a Sample
2183 const UnsignedInteger size = x.getDimension();
2184 // Special case for no conditioning or independent copula
2185 if ((conditioningDimension == 0) || (hasIndependentCopula()))
2186 {
2187 SampleImplementation xAsSample(size, 1);
2188 xAsSample.setData(x);
2189 return getMarginal(conditioningDimension).computeCDF(xAsSample).getImplementation()->getData();
2190 }
2191 // General case
2192 Indices conditioning(conditioningDimension);
2193 conditioning.fill();
2194 Indices conditioned(conditioning);
2195 conditioned.add(conditioningDimension);
2196 const Implementation conditioningDistribution(getMarginal(conditioning).getImplementation());
2197 const Sample pdfConditioning(conditioningDistribution->computePDF(y));
2198 const Implementation conditionedDistribution(getMarginal(conditioned).getImplementation());
2199 const Scalar xMin = conditionedDistribution->getRange().getLowerBound()[conditioningDimension];
2200 const Scalar xMax = conditionedDistribution->getRange().getUpperBound()[conditioningDimension];
2201 Point result(size);
2202 Pointer<ConditionalPDFWrapper> p_conditionalPDFWrapper = new ConditionalPDFWrapper(conditionedDistribution);
2203 GaussKronrod algo;
2204 for (UnsignedInteger i = 0; i < size; ++i)
2205 if (pdfConditioning(i, 0) > 0.0)
2206 {
2207 if (x[i] >= xMax) result[i] = 1.0;
2208 else if (x[i] > xMin)
2209 {
2210 // Numerical integration with respect to x
2211 p_conditionalPDFWrapper->setParameter(y[i]);
2212 const Scalar value(algo.integrate(UniVariateFunction(p_conditionalPDFWrapper), xMin, x[i]));
2213 result[i] = std::min(1.0, std::max(0.0, value / pdfConditioning(i, 0)));
2214 } // xMin < x < xMax
2215 } // pdfConditioning(i, 0) > 0
2216 return result;
2217 }
2218
2219 /* Compute the quantile of Xi | X1, ..., Xi-1, i.e. x such that CDF(x|y) = q with x = Xi, y = (X1,...,Xi-1) */
computeConditionalQuantile(const Scalar q,const Point & y) const2220 Scalar DistributionImplementation::computeConditionalQuantile(const Scalar q,
2221 const Point & y) const
2222 {
2223 if (y.getDimension() == 0 || hasIndependentCopula()) return getMarginal(y.getDimension()).computeScalarQuantile(q);
2224 return computeConditionalQuantile(Point(1, q), Sample(1, y))[0];
2225 }
2226
2227 /* Compute the quantile of Xi | X1, ..., Xi-1, i.e. x such that CDF(x|y) = q with x = Xi, y = (X1,...,Xi-1) */
computeSequentialConditionalQuantile(const Point & q) const2228 Point DistributionImplementation::computeSequentialConditionalQuantile(const Point & q) const
2229 {
2230 Point result(0);
2231 Point y(0);
2232 for (UnsignedInteger i = 0; i < dimension_; ++i)
2233 result.add(computeConditionalQuantile(q[i], result));
2234 return result;
2235 }
2236
2237 /* Compute the quantile of Xi | X1, ..., Xi-1, i.e. x such that CDF(x|y) = q with x = Xi, y = (X1,...,Xi-1) */
computeConditionalQuantile(const Point & q,const Sample & y) const2238 Point DistributionImplementation::computeConditionalQuantile(const Point & q,
2239 const Sample & y) const
2240 {
2241 const UnsignedInteger conditioningDimension = y.getDimension();
2242 if (conditioningDimension >= dimension_) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional quantile with a conditioning point of dimension greater or equal to the distribution dimension.";
2243 const UnsignedInteger size = q.getDimension();
2244 for (UnsignedInteger i = 0; i < size; ++i)
2245 {
2246 if ((q[i] < 0.0) || (q[i] > 1.0)) throw InvalidArgumentException(HERE) << "Error: point=" << i << ", cannot compute a conditional quantile for a probability level q[" << i << "]=" << q[i] << " outside of [0, 1]";
2247 }
2248 // Special case for no conditioning or independent copula
2249 if ((conditioningDimension == 0) || (hasIndependentCopula()))
2250 return getMarginal(conditioningDimension).computeQuantile(q).getImplementation()->getData();
2251 // General case
2252 const Scalar xMin = getRange().getLowerBound()[conditioningDimension];
2253 const Scalar xMax = getRange().getUpperBound()[conditioningDimension];
2254 Point result(size);
2255 // Here we recreate a ConditionalCDFWrapper only if none has been created or if the parameter dimension has changed
2256 Pointer<ConditionalCDFWrapper> p_conditionalCDFWrapper = new ConditionalCDFWrapper(this);
2257 for (UnsignedInteger i = 0; i < size; ++i)
2258 {
2259 p_conditionalCDFWrapper->setParameter(y[i]);
2260 Brent solver(quantileEpsilon_, cdfEpsilon_, cdfEpsilon_, quantileIterations_);
2261 result[i] = solver.solve(UniVariateFunction(p_conditionalCDFWrapper), q[i], xMin, xMax, 0.0, 1.0);
2262 }
2263 return result;
2264 }
2265
2266 /* Quantile computation for dimension=1 */
computeScalarQuantile(const Scalar prob,const Bool tail) const2267 Scalar DistributionImplementation::computeScalarQuantile(const Scalar prob,
2268 const Bool tail) const
2269 {
2270 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: the method computeScalarQuantile is only defined for 1D distributions";
2271 // This test allows to check if one can trust the current range. If not, it means that we are here to compute the range and then we cannot rely on it!
2272 Scalar lower = getRange().getLowerBound()[0];
2273 Scalar upper = getRange().getUpperBound()[0];
2274 // This test allows to know if the range has already been computed. If not, it is the role of the computeScalarQuantile() to do it.
2275 if (lower > upper)
2276 {
2277 LOGDEBUG("DistributionImplementation::computeScalarQuantile: look for a bracketing of the bounds of the range");
2278 // Find a rough estimate of the lower bound and the upper bound
2279 Scalar step = 1.0;
2280 Scalar cdf = computeCDF(lower);
2281 if (cdf >= cdfEpsilon_)
2282 {
2283 // negative lower bound
2284 lower -= step;
2285 cdf = computeCDF(lower);
2286 while (cdf >= cdfEpsilon_)
2287 {
2288 step *= 2.0;
2289 lower -= step;
2290 cdf = computeCDF(lower);
2291 }
2292 }
2293 else
2294 {
2295 // positive lower bound
2296 lower += step;
2297 cdf = computeCDF(lower);
2298 while (computeCDF(lower) <= cdfEpsilon_)
2299 {
2300 step *= 2.0;
2301 lower += step;
2302 cdf = computeCDF(lower);
2303 }
2304 }
2305 // Here, lower is a rough estimate of the lower bound
2306 // Go to the upper bound
2307 upper = lower;
2308 step = 1.0;
2309 Scalar ccdf = computeComplementaryCDF(upper);
2310 while (ccdf >= cdfEpsilon_)
2311 {
2312 upper += step;
2313 step *= 2.0;
2314 ccdf = computeComplementaryCDF(upper);
2315 }
2316 }
2317 LOGDEBUG(OSS() << "DistributionImplementation::computeScalarQuantile: lower=" << lower << ", upper=" << upper);
2318 if (prob < 0.0) return (tail ? upper : lower);
2319 if (prob >= 1.0) return (tail ? lower : upper);
2320 const Scalar q = tail ? 1.0 - prob : prob;
2321 const CDFWrapper wrapper(this);
2322 const Function f(bindMethod<CDFWrapper, Point, Point>(wrapper, &CDFWrapper::computeCDF, 1, 1));
2323 const Scalar leftTau = lower;
2324 const Scalar leftCDF = 0.0;
2325 const Scalar rightTau = upper;
2326 const Scalar rightCDF = 1.0;
2327 Brent solver(quantileEpsilon_, cdfEpsilon_, cdfEpsilon_, quantileIterations_);
2328 const Scalar root = solver.solve(f, q, leftTau, rightTau, leftCDF, rightCDF);
2329 LOGDEBUG(OSS() << "root=" << root);
2330 return root;
2331 } // computeScalarQuantile
2332
2333
2334 // Structure used to implement the computeQuantile() method efficiently
2335 struct CopulaQuantileWrapper
2336 {
CopulaQuantileWrapperCopulaQuantileWrapper2337 CopulaQuantileWrapper(const DistributionImplementation * p_distribution)
2338 : p_distribution_(p_distribution)
2339 , dimension_(p_distribution->getDimension())
2340 {
2341 // Nothing to do
2342 }
2343
computeDiagonalCopulaQuantileWrapper2344 Point computeDiagonal(const Point & u) const
2345 {
2346 const Point point(dimension_, u[0]);
2347 const Scalar cdf = p_distribution_->computeCDF(point);
2348 const Point value(1, cdf);
2349 return value;
2350 }
2351
2352 const DistributionImplementation * p_distribution_;
2353 const UnsignedInteger dimension_;
2354 }; // struct CopulaQuantileWrapper
2355
2356 /* Generic implementation of the quantile computation for copulas */
computeQuantileCopula(const Scalar prob,const Bool tail) const2357 Point DistributionImplementation::computeQuantileCopula(const Scalar prob,
2358 const Bool tail) const
2359 {
2360 const UnsignedInteger dimension = getDimension();
2361 // Special case for bording values
2362 const Scalar q = tail ? 1.0 - prob : prob;
2363 if (q <= 0.0) return Point(dimension, 0.0);
2364 if (q >= 1.0) return Point(dimension, 1.0);
2365 // Special case for dimension 1
2366 if (dimension == 1) return Point(1, q);
2367 CopulaQuantileWrapper wrapper(this);
2368 const Function f(bindMethod<CopulaQuantileWrapper, Point, Point>(wrapper, &CopulaQuantileWrapper::computeDiagonal, 1, 1));
2369 Scalar leftTau = q;
2370 const Point leftPoint(1, leftTau);
2371 const Point leftValue(f(leftPoint));
2372 Scalar leftCDF = leftValue[0];
2373 // Upper bound of the bracketing interval
2374 Scalar rightTau = 1.0 - (1.0 - q) / dimension;
2375 Point rightPoint(1, rightTau);
2376 const Point rightValue(f(rightPoint));
2377 Scalar rightCDF = rightValue[0];
2378 // Use Brent's method to compute the quantile efficiently
2379 Brent solver(cdfEpsilon_, cdfEpsilon_, cdfEpsilon_, quantileIterations_);
2380 return Point(dimension, solver.solve(f, q, leftTau, rightTau, leftCDF, rightCDF));
2381 }
2382
2383 /* Generic implementation of the quantile computation */
computeQuantile(const Scalar prob,const Bool tail) const2384 Point DistributionImplementation::computeQuantile(const Scalar prob,
2385 const Bool tail) const
2386 {
2387 if (isCopula())
2388 return computeQuantileCopula(prob, tail);
2389 Scalar marginalProb = 0.0;
2390 return computeQuantile(prob, tail, marginalProb);
2391 }
2392
computeQuantile(const Scalar prob,const Bool tail,Scalar & marginalProb) const2393 Point DistributionImplementation::computeQuantile(const Scalar prob,
2394 const Bool tail,
2395 Scalar & marginalProb) const
2396 {
2397 const Scalar q = tail ? 1.0 - prob : prob;
2398 marginalProb = q;
2399 // Special case for bording values
2400 if (prob < quantileEpsilon_) return (tail ? getRange().getUpperBound() : getRange().getLowerBound());
2401 if (prob >= 1.0 - quantileEpsilon_) return (tail ? getRange().getLowerBound() : getRange().getUpperBound());
2402 // Special case for dimension 1
2403 if (dimension_ == 1) return Point(1, computeScalarQuantile(prob, tail));
2404 // Special case for independent copula
2405 if (hasIndependentCopula())
2406 {
2407 Point result(dimension_);
2408 marginalProb = std::pow(q, 1.0 / dimension_);
2409 for (UnsignedInteger i = 0; i < dimension_; ++i) result[i] = getMarginal(i).computeScalarQuantile(marginalProb);
2410 return result;
2411 }
2412 // Extract the marginal distributions
2413 Collection<Implementation> marginals(dimension_);
2414 for (UnsignedInteger i = 0; i < dimension_; i++) marginals[i] = getMarginal(i).getImplementation();
2415 // The n-D quantile is defined as X(\tau) = (F_1^{-1}(\tau), ..., F_n^{-1}(\tau)),
2416 // with tau such as F(X(\tau)) = q.
2417 // As F(x) = C(F_1(x_1),...,F_n(x_n)), the constraint F(X(\tau)) = q reads:
2418 // C(\tau,...,\tau) = q
2419 // Bracketing of \tau using the Frechet Hoeffding bounds:
2420 // max(n\tau - n + 1, 0) <= C(\tau,...,\tau) <= \tau
2421 // from which we deduce that q <= \tau and \tau <= 1 - (1 - q) / n
2422 // Lower bound of the bracketing interval
2423 const QuantileWrapper wrapper(marginals, this);
2424 const Function f(bindMethod<QuantileWrapper, Point, Point>(wrapper, &QuantileWrapper::computeDiagonal, 1, 1));
2425 Scalar leftTau = q;
2426 Scalar leftCDF = f(Point(1, leftTau))[0];
2427 // Due to numerical precision issues, the theoretical bound can be slightly violated
2428 if (leftCDF > prob)
2429 {
2430 leftTau = 0.0;
2431 leftCDF = 0.0;
2432 }
2433 // Upper bound of the bracketing interval
2434 Scalar rightTau = 1.0 - (1.0 - q) / dimension_;
2435 Scalar rightCDF = f(Point(1, rightTau))[0];
2436 // Due to numerical precision issues, the theoretical bound can be slightly violated
2437 if (rightCDF < prob)
2438 {
2439 rightTau = 1.0;
2440 rightCDF = 1.0;
2441 }
2442 LOGDEBUG(OSS() << "DistributionImplementation::computeQuantile: dimension=" << dimension_ << ", q=" << q << ", leftTau=" << leftTau << ", leftCDF=" << leftCDF << ", rightTau=" << rightTau << ", rightCDF=" << rightCDF);
2443 // Use Brent's method to compute the quantile efficiently for continuous distributions
2444 const Brent solver(quantileEpsilon_, cdfEpsilon_, cdfEpsilon_, quantileIterations_);
2445 marginalProb = solver.solve(f, q, leftTau, rightTau, leftCDF, rightCDF);
2446 LOGINFO(OSS(false) << "tau=" << marginalProb);
2447 return wrapper.diagonalToSpace(marginalProb);
2448 }
2449
2450 /* Get the minimum volume interval containing at least a given probability of the distribution.
2451 The minimum volume interval [a, b] is such that:
2452 a\in[lowerBound, F^{-1}(1-p)]
2453 b = F^{-1}(p+F(a))
2454 f(a) = f(b) = f(F^{-1}(p+F(a)))
2455 so we look for the root of f(F^{-1}(p+F(a))) - f(a)
2456 */
2457 struct MinimumVolumeIntervalWrapper
2458 {
MinimumVolumeIntervalWrapperMinimumVolumeIntervalWrapper2459 MinimumVolumeIntervalWrapper(const DistributionImplementation * p_distribution,
2460 const Collection<Distribution> & marginals,
2461 const Scalar prob)
2462 : p_distribution_(p_distribution)
2463 , marginals_(marginals)
2464 , lastB_(SpecFunc::LowestScalar)
2465 , prob_(prob)
2466 {
2467 // Nothing to do
2468 }
2469
MinimumVolumeIntervalWrapperMinimumVolumeIntervalWrapper2470 MinimumVolumeIntervalWrapper(const DistributionImplementation * p_distribution,
2471 const Scalar prob)
2472 : p_distribution_(p_distribution)
2473 , marginals_(0)
2474 , lastB_(SpecFunc::LowestScalar)
2475 , prob_(prob)
2476 {
2477 // Nothing to do
2478 }
2479
2480 // The minimum volume interval [a, b] is such that:
2481 // a\in[lowerBound, F^{-1}(1-p)]
2482 // b = F^{-1}(p+F(a))
2483 // f(a) = f(b) = f(F^{-1}(p+F(a)))
2484 // Here we compute f(F^{-1}(p+F(a))) - f(a)
operator ()MinimumVolumeIntervalWrapper2485 Point operator() (const Point & point) const
2486 {
2487 lastB_ = p_distribution_->computeQuantile(prob_ + p_distribution_->computeCDF(point))[0];
2488 const Scalar pdfB = p_distribution_->computePDF(lastB_);
2489 const Scalar pdfPoint = p_distribution_->computePDF(point);
2490 return Point(1, pdfB - pdfPoint);
2491 }
2492
objectiveMinimumVolumeIntervalWrapper2493 Point objective(const Point & point) const
2494 {
2495 lastB_ = p_distribution_->computeQuantile(prob_ + p_distribution_->computeCDF(point))[0];
2496 return Point(1, lastB_ - point[0]);
2497 }
2498
getLastBMinimumVolumeIntervalWrapper2499 Scalar getLastB() const
2500 {
2501 return lastB_;
2502 }
2503
buildBilateralIntervalMinimumVolumeIntervalWrapper2504 Interval buildBilateralInterval(const Scalar beta) const
2505 {
2506 const UnsignedInteger size(marginals_.getSize());
2507 Point lower(size);
2508 Point upper(size);
2509 const Scalar alpha(0.5 * (1.0 - beta));
2510 for (UnsignedInteger i = 0; i < size; ++i)
2511 {
2512 lower[i] = marginals_[i].computeQuantile(alpha, false)[0];
2513 upper[i] = marginals_[i].computeQuantile(alpha, true)[0];
2514 }
2515 return Interval(lower, upper);
2516 }
2517
buildMinimumVolumeIntervalMinimumVolumeIntervalWrapper2518 Interval buildMinimumVolumeInterval(const Scalar beta) const
2519 {
2520 const UnsignedInteger size(marginals_.getSize());
2521 Point lower(size);
2522 Point upper(size);
2523 for (UnsignedInteger i = 0; i < size; ++i)
2524 {
2525 const Interval marginalIC(marginals_[i].computeMinimumVolumeInterval(beta));
2526 lower[i] = marginalIC.getLowerBound()[0];
2527 upper[i] = marginalIC.getUpperBound()[0];
2528 }
2529 return Interval(lower, upper);
2530 }
2531
computeBilateralProbabilityMinimumVolumeIntervalWrapper2532 Point computeBilateralProbability(const Point & beta) const
2533 {
2534 const Interval IC(buildBilateralInterval(beta[0]));
2535 const Scalar probability = p_distribution_->computeProbability(IC);
2536 return Point(1, probability);
2537 }
2538
computeMinimumVolumeProbabilityMinimumVolumeIntervalWrapper2539 Point computeMinimumVolumeProbability(const Point & beta) const
2540 {
2541 const Interval IC(buildMinimumVolumeInterval(beta[0]));
2542 const Scalar probability = p_distribution_->computeProbability(IC);
2543 return Point(1, probability);
2544 }
2545
2546 const DistributionImplementation * p_distribution_;
2547 Collection<Distribution> marginals_;
2548 mutable Scalar lastB_;
2549 const Scalar prob_;
2550 }; // struct MinimumVolumeIntervalWrapper
2551
computeMinimumVolumeInterval(const Scalar prob) const2552 Interval DistributionImplementation::computeMinimumVolumeInterval(const Scalar prob) const
2553 {
2554 Scalar marginalProb = -1.0;
2555 return computeMinimumVolumeIntervalWithMarginalProbability(prob, marginalProb);
2556 }
2557
computeMinimumVolumeIntervalWithMarginalProbability(const Scalar prob,Scalar & marginalProb) const2558 Interval DistributionImplementation::computeMinimumVolumeIntervalWithMarginalProbability(const Scalar prob,
2559 Scalar & marginalProb) const
2560 {
2561 if (!isContinuous()) throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeMinimumVolumeInterval()";
2562 // If the distribution is elliptical, the minimum volume interval is equal to the bilateral confidence interval which is much cheaper to compute
2563 if (isElliptical())
2564 {
2565 LOGINFO("Compute the minimum volume interval using the bilateral confidence interval (elliptical case)");
2566 const Interval result(computeBilateralConfidenceIntervalWithMarginalProbability(prob, marginalProb));
2567 return result;
2568 }
2569 if (prob <= 0.0)
2570 {
2571 const Point median(computeQuantile(0.5));
2572 marginalProb = 0.0;
2573 return Interval(median, median);
2574 }
2575 if (prob >= 1.0)
2576 {
2577 marginalProb = 1.0;
2578 return getRange();
2579 }
2580 if (dimension_ == 1)
2581 {
2582 // First, the most accurate method, which assumes a continuous PDF
2583 try
2584 {
2585 const Interval result(computeUnivariateMinimumVolumeIntervalByRootFinding(prob, marginalProb));
2586 LOGINFO("Compute the minimum volume interval by root finding (continuous case)");
2587 return result;
2588 }
2589 // Second, the general purpose method
2590 catch(...)
2591 {
2592 const Interval result(computeUnivariateMinimumVolumeIntervalByOptimization(prob, marginalProb));
2593 LOGINFO("Compute the minimum volume interval by optimization (general case)");
2594 return result;
2595 }
2596 }
2597 Collection<Distribution> marginals(dimension_);
2598 for (UnsignedInteger i = 0; i < dimension_; ++i) marginals[i] = getMarginal(i);
2599 const MinimumVolumeIntervalWrapper minimumVolumeIntervalWrapper(this, marginals, prob);
2600 const Function function(bindMethod<MinimumVolumeIntervalWrapper, Point, Point>(minimumVolumeIntervalWrapper, &MinimumVolumeIntervalWrapper::computeMinimumVolumeProbability, 1, 1));
2601 Brent solver(quantileEpsilon_, pdfEpsilon_, pdfEpsilon_, quantileIterations_);
2602 // Here the equation we have to solve is P(X\in IC(\beta))=prob
2603 marginalProb = solver.solve(function, prob, 0.0, 1.0, 0.0, 1.0);
2604 const Interval IC(minimumVolumeIntervalWrapper.buildMinimumVolumeInterval(marginalProb));
2605 return IC;
2606 }
2607
2608 /* If the density is continuous, we have to solve PDF(b) - PDF(a) == 0 with F(b)-F(a)=prob, b>=a
2609 ie b=F^{-1}(prob+F(a))
2610 */
computeUnivariateMinimumVolumeIntervalByRootFinding(const Scalar prob,Scalar & marginalProb) const2611 Interval DistributionImplementation::computeUnivariateMinimumVolumeIntervalByRootFinding(const Scalar prob,
2612 Scalar & marginalProb) const
2613 {
2614 const MinimumVolumeIntervalWrapper minimumVolumeIntervalWrapper(this, prob);
2615 const Function function(bindMethod<MinimumVolumeIntervalWrapper, Point, Point>(minimumVolumeIntervalWrapper, &MinimumVolumeIntervalWrapper::operator(), 1, 1));
2616 Brent solver(quantileEpsilon_, pdfEpsilon_, pdfEpsilon_, quantileIterations_);
2617 const Scalar xMin = getRange().getLowerBound()[0];
2618 const Scalar xMax = computeScalarQuantile(prob, true);
2619 const Scalar a = solver.solve(function, 0.0, xMin, xMax);
2620 const Scalar b = minimumVolumeIntervalWrapper.getLastB();
2621 marginalProb = prob;
2622 return Interval(a, b);
2623 }
2624
2625 /* We minimize b-a with the constraint F(b)-F(a)=prob, b>=a
2626 ie b=F^{-1}(prob+F(a))
2627 */
computeUnivariateMinimumVolumeIntervalByOptimization(const Scalar prob,Scalar & marginalProb) const2628 Interval DistributionImplementation::computeUnivariateMinimumVolumeIntervalByOptimization(const Scalar prob,
2629 Scalar & marginalProb) const
2630 {
2631 const MinimumVolumeIntervalWrapper minimumVolumeIntervalWrapper(this, prob);
2632 const Function objective(bindMethod<MinimumVolumeIntervalWrapper, Point, Point>(minimumVolumeIntervalWrapper, &MinimumVolumeIntervalWrapper::objective, 1, 1));
2633 OptimizationProblem problem(objective);
2634 problem.setBounds(getRange());
2635 TNC solver(problem);
2636 solver.setIgnoreFailure(true);
2637 solver.setStartingPoint(computeQuantile(prob, true));
2638 solver.run();
2639 const Scalar a = solver.getResult().getOptimalPoint()[0];
2640 const Scalar b = minimumVolumeIntervalWrapper.getLastB();
2641 marginalProb = prob;
2642 return Interval(a, b);
2643 }
2644
2645 /* Get the product bilateral confidence interval containing a given probability of the distribution */
computeBilateralConfidenceInterval(const Scalar prob) const2646 Interval DistributionImplementation::computeBilateralConfidenceInterval(const Scalar prob) const
2647 {
2648 Scalar marginalProb = -1.0;
2649 return computeBilateralConfidenceIntervalWithMarginalProbability(prob, marginalProb);
2650 }
2651
computeBilateralConfidenceIntervalWithMarginalProbability(const Scalar prob,Scalar & marginalProb) const2652 Interval DistributionImplementation::computeBilateralConfidenceIntervalWithMarginalProbability(const Scalar prob,
2653 Scalar & marginalProb) const
2654 {
2655 if (prob <= 0.0)
2656 {
2657 const Point median(computeQuantile(0.5));
2658 marginalProb = 0.0;
2659 return Interval(median, median);
2660 }
2661 if (prob >= 1.0)
2662 {
2663 marginalProb = 1.0;
2664 return getRange();
2665 }
2666 if (dimension_ == 1)
2667 {
2668 marginalProb = prob;
2669 const Interval IC(computeQuantile(0.5 * (1.0 - prob), false), computeQuantile(0.5 * (1.0 - prob), true));
2670 return IC;
2671 }
2672 if (!isContinuous()) throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeBilateralConfidenceIntervalWithMarginalProbability() for non continuous multivariate distributions";
2673 Collection<Distribution> marginals(dimension_);
2674 for (UnsignedInteger i = 0; i < dimension_; ++i) marginals[i] = getMarginal(i);
2675 const MinimumVolumeIntervalWrapper minimumVolumeIntervalWrapper(this, marginals, prob);
2676 const Function function(bindMethod<MinimumVolumeIntervalWrapper, Point, Point>(minimumVolumeIntervalWrapper, &MinimumVolumeIntervalWrapper::computeBilateralProbability, 1, 1));
2677 Brent solver(quantileEpsilon_, pdfEpsilon_, pdfEpsilon_, quantileIterations_);
2678 marginalProb = solver.solve(function, prob, 0.0, 1.0, 0.0, 1.0);
2679 const Interval IC(minimumVolumeIntervalWrapper.buildBilateralInterval(marginalProb));
2680 return IC;
2681 }
2682
2683 /* Get the product unilateral confidence interval containing a given probability of the distribution */
computeUnilateralConfidenceInterval(const Scalar prob,const Bool tail) const2684 Interval DistributionImplementation::computeUnilateralConfidenceInterval(const Scalar prob,
2685 const Bool tail) const
2686 {
2687 Scalar marginalProb = -1.0;
2688 return computeUnilateralConfidenceIntervalWithMarginalProbability(prob, tail, marginalProb);
2689 }
2690
computeUnilateralConfidenceIntervalWithMarginalProbability(const Scalar prob,const Bool tail,Scalar & marginalProb) const2691 Interval DistributionImplementation::computeUnilateralConfidenceIntervalWithMarginalProbability(const Scalar prob,
2692 const Bool tail,
2693 Scalar & marginalProb) const
2694 {
2695 marginalProb = -1.0;
2696 if (tail)
2697 {
2698 const Point lowerBound(computeInverseSurvivalFunction(prob, marginalProb));
2699 return Interval(lowerBound, getRange().getUpperBound());
2700 }
2701 const Point upperBound(computeQuantile(prob, false, marginalProb));
2702 return Interval(getRange().getLowerBound(), upperBound);
2703 }
2704
2705 /* Get the minimum volume level set containing at least a given probability of the distribution.
2706 The minimum volume level A(p) set is such that A(p)={x\in R^n | y(x) <= y_p}
2707 where y(x)=-\log X and y_p is the p-quantile of Y=pdf(X)
2708 */
computeMinimumVolumeLevelSet(const Scalar prob) const2709 LevelSet DistributionImplementation::computeMinimumVolumeLevelSet(const Scalar prob) const
2710 {
2711 Scalar threshold = -1.0;
2712 return computeMinimumVolumeLevelSetWithThreshold(prob, threshold);
2713 }
2714
computeMinimumVolumeLevelSetWithThreshold(const Scalar prob,Scalar & threshold) const2715 LevelSet DistributionImplementation::computeMinimumVolumeLevelSetWithThreshold(const Scalar prob,
2716 Scalar & threshold) const
2717 {
2718 if (!isContinuous()) throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeMinimumVolumeLevelSet()";
2719 // 1D special case here to avoid a double construction of minimumVolumeLevelSetFunction
2720 if ((dimension_ == 1) && (ResourceMap::GetAsBool("Distribution-MinimumVolumeLevelSetBySampling")))
2721 {
2722 LOGINFO("Compute the minimum volume level set by sampling (QMC)");
2723 const LevelSet result(computeUnivariateMinimumVolumeLevelSetByQMC(prob, threshold));
2724 return result;
2725 }
2726 Function minimumVolumeLevelSetFunction(MinimumVolumeLevelSetEvaluation(clone()).clone());
2727 minimumVolumeLevelSetFunction.setGradient(MinimumVolumeLevelSetGradient(clone()).clone());
2728 // If dimension_ == 1 the threshold can be computed analyticaly
2729 Scalar minusLogPDFThreshold;
2730 if (dimension_ == 1)
2731 {
2732 const CompositeDistribution composite(minimumVolumeLevelSetFunction, *this);
2733 minusLogPDFThreshold = composite.computeQuantile(prob)[0];
2734 LOGINFO("Compute the minimum volume level set by using a composite distribution quantile (univariate general case)");
2735 } // dimension == 1
2736 else
2737 {
2738 LOGINFO("Compute the minimum volume level set by sampling (Monte Carlo)");
2739 const UnsignedInteger size = ResourceMap::GetAsUnsignedInteger("Distribution-MinimumVolumeLevelSetSamplingSize");
2740 const Sample xSample(getSample(size));
2741 const Sample logPDFSample(computeLogPDF(xSample));
2742 minusLogPDFThreshold = -logPDFSample.computeQuantile(1.0 - prob)[0];
2743 } // dimension > 1
2744 threshold = std::exp(-minusLogPDFThreshold);
2745
2746 return LevelSet(minimumVolumeLevelSetFunction, LessOrEqual(), minusLogPDFThreshold);
2747 }
2748
computeUnivariateMinimumVolumeLevelSetByQMC(const Scalar prob,Scalar & threshold) const2749 LevelSet DistributionImplementation::computeUnivariateMinimumVolumeLevelSetByQMC(const Scalar prob,
2750 Scalar & threshold) const
2751 {
2752 Function minimumVolumeLevelSetFunction(MinimumVolumeLevelSetEvaluation(clone()).clone());
2753 minimumVolumeLevelSetFunction.setGradient(MinimumVolumeLevelSetGradient(clone()).clone());
2754 // As we are in 1D and as the function defining the composite distribution can have complex variations,
2755 // we use an improved sampling method to compute the quantile of the -logPDF(X) distribution
2756 const UnsignedInteger size = SpecFunc::NextPowerOfTwo(ResourceMap::GetAsUnsignedInteger("Distribution-MinimumVolumeLevelSetSamplingSize"));
2757 const Sample xQMC(getSampleByQMC(size));
2758 const Sample logPDFSample(computeLogPDF(xQMC));
2759 const Scalar minusLogPDFThreshold = -logPDFSample.computeQuantile(1.0 - prob)[0];
2760 threshold = std::exp(-minusLogPDFThreshold);
2761
2762 return LevelSet(minimumVolumeLevelSetFunction, LessOrEqual(), minusLogPDFThreshold);
2763 }
2764
2765 /* Get the mathematical and numerical range of the distribution.
2766 Its mathematical range is the smallest closed interval outside
2767 of which the PDF is zero, and the numerical range is the interval
2768 outside of which the PDF is rounded to zero in double precision */
getRange() const2769 Interval DistributionImplementation::getRange() const
2770 {
2771 return range_;
2772 }
2773
setRange(const Interval & range)2774 void DistributionImplementation::setRange(const Interval & range)
2775 {
2776 if (range.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given range has a dimension incompatible with the dimension of the distribution.";
2777 range_ = range;
2778 }
2779
2780 /* Compute the numerical range of the distribution given the parameters values */
computeRange()2781 void DistributionImplementation::computeRange()
2782 {
2783 // Quick return for copulas
2784 if (isCopula())
2785 {
2786 range_ = Interval(dimension_);
2787 return;
2788 }
2789 const Interval::BoolCollection finiteLowerBound(dimension_, false);
2790 const Interval::BoolCollection finiteUpperBound(dimension_, false);
2791 // Initialize the range with inverted bounds in order to inform the generic implementation of the
2792 // computeScalarQuantile() method that it cannot trust the current range
2793 range_ = Interval(Point(dimension_, 1.0), Point(dimension_, -1.0), finiteLowerBound, finiteUpperBound);
2794 setRange(Interval(computeLowerBound(), computeUpperBound(), finiteLowerBound, finiteUpperBound));
2795 }
2796
2797 /* Compute the lower bound of the range */
computeLowerBound() const2798 Point DistributionImplementation::computeLowerBound() const
2799 {
2800 // For a multivariate distribution, the range is the axes aligned box that fits to the marginal ranges
2801 Point lowerBound(dimension_);
2802 // Here, we must separate the 1D case from the nD case as the getMarginal() method is generic for 1D case and
2803 // would involve a circular call to computeRange()
2804 if (dimension_ == 1) lowerBound[0] = computeScalarQuantile(cdfEpsilon_);
2805 else for (UnsignedInteger i = 0; i < dimension_; ++i) lowerBound[i] = getMarginal(i).computeScalarQuantile(cdfEpsilon_);
2806 return lowerBound;
2807 }
2808
2809 /* Compute the upper bound of the range */
computeUpperBound() const2810 Point DistributionImplementation::computeUpperBound() const
2811 {
2812 // For a multivariate distribution, the range is the axes aligned box that fits to the marginal ranges
2813 Point upperBound(dimension_);
2814 if (dimension_ == 1) upperBound[0] = computeScalarQuantile(cdfEpsilon_, true);
2815 else for (UnsignedInteger i = 0; i < dimension_; ++i) upperBound[i] = getMarginal(i).computeScalarQuantile(cdfEpsilon_, true);
2816 return upperBound;
2817 }
2818
2819 /* Compute the mean of the distribution */
computeMean() const2820 void DistributionImplementation::computeMean() const
2821 {
2822 mean_ = getShiftedMoment(1, Point(getDimension(), 0.0));
2823 isAlreadyComputedMean_ = true;
2824 }
2825
2826 /* Get the mean of the distribution */
getMean() const2827 Point DistributionImplementation::getMean() const
2828 {
2829 if (isCopula())
2830 return Point(getDimension(), 0.5);
2831 if (!isAlreadyComputedMean_) computeMean();
2832 return mean_;
2833 }
2834
2835 /* Get the standard deviation of the distribution */
getStandardDeviation() const2836 Point DistributionImplementation::getStandardDeviation() const
2837 {
2838 // In the case of dimension==1, use the covariance matrix. The call
2839 // to getCovariance() reuse the covariance if it has already been computed.
2840 if (dimension_ == 1) return Point(1, std::sqrt(getCovariance()(0, 0)));
2841 // In higher dimension, either use the covariance if it has already been
2842 // computed...
2843
2844 if (isCopula())
2845 // 0.2886751345948128822545744 = 1 / sqrt(12)
2846 return Point(getDimension(), 0.2886751345948128822545744);
2847
2848 if (isAlreadyComputedCovariance_)
2849 {
2850 Point result(dimension_);
2851 for (UnsignedInteger i = 0; i < dimension_; ++i) result[i] = std::sqrt(covariance_(i, i));
2852 return result;
2853 }
2854 // ... or compute only the marginal variances.
2855 const Point variance(getCenteredMoment(2));
2856 Point result(dimension_);
2857 for (UnsignedInteger i = 0; i < dimension_; ++i) result[i] = std::sqrt(variance[i]);
2858 return result;
2859 }
2860
2861 /* Get the skewness of the distribution */
getSkewness() const2862 Point DistributionImplementation::getSkewness() const
2863 {
2864 if (isCopula())
2865 return Point(getDimension(), 0.0);
2866 const Point variance(getCenteredMoment(2));
2867 const Point thirdMoment(getCenteredMoment(3));
2868 Point result(dimension_);
2869 for (UnsignedInteger i = 0; i < dimension_; ++i) result[i] = thirdMoment[i] / std::pow(variance[i], 1.5);
2870 return result;
2871 }
2872
2873 /* Get the kurtosis of the distribution */
getKurtosis() const2874 Point DistributionImplementation::getKurtosis() const
2875 {
2876 if (isCopula())
2877 // 1.8 = 9/5
2878 return Point(getDimension(), 1.8);
2879
2880 const Point variance(getCenteredMoment(2));
2881 const Point fourthMoment(getCenteredMoment(4));
2882 Point result(dimension_);
2883 for (UnsignedInteger i = 0; i < dimension_; ++i) result[i] = fourthMoment[i] / std::pow(variance[i], 2.0);
2884 return result;
2885 }
2886
2887 /* Get the moments of the distribution */
getMoment(const UnsignedInteger n) const2888 Point DistributionImplementation::getMoment(const UnsignedInteger n) const
2889 {
2890 if (n == 0) return Point(dimension_, 1.0);
2891 return getShiftedMoment(n, Point(dimension_, 0.0));
2892 }
2893
2894 /* Get the centered moments of the distribution */
getCenteredMoment(const UnsignedInteger n) const2895 Point DistributionImplementation::getCenteredMoment(const UnsignedInteger n) const
2896 {
2897 if (n == 0) throw InvalidArgumentException(HERE) << "Error: the centered moments of order 0 are undefined.";
2898 if (n == 1) return Point(dimension_, 0.0);
2899 return getShiftedMoment(n, getMean());
2900 }
2901
2902 /* Compute the covariance of the distribution */
computeCovariance() const2903 void DistributionImplementation::computeCovariance() const
2904 {
2905 if (isCopula()) computeCovarianceCopula();
2906 else if (isContinuous()) computeCovarianceContinuous();
2907 else if (isDiscrete()) computeCovarianceDiscrete();
2908 else computeCovarianceGeneral();
2909 }
2910
2911
2912
2913 struct CopulaCovarianceWrapper
2914 {
CopulaCovarianceWrapperCopulaCovarianceWrapper2915 CopulaCovarianceWrapper(const Distribution & distribution)
2916 : distribution_(distribution)
2917 {
2918 // Nothing to do
2919 }
2920
kernelCopulaCovarianceWrapper2921 Point kernel(const Point & point) const
2922 {
2923 return Point(1, distribution_.computeCDF(point) - point[0] * point[1]);
2924 }
2925
2926 const Distribution & distribution_;
2927 };
2928
2929 /* Compute the covariance of the copula */
computeCovarianceCopula() const2930 void DistributionImplementation::computeCovarianceCopula() const
2931 {
2932 const UnsignedInteger dimension = getDimension();
2933 // We need this to initialize the covariance matrix in two cases:
2934 // + this is the first call to this routine (which could be checked by testing the dimension of the copula and the dimension of the matrix
2935 // + the copula has changed from a non-independent one to the independent copula
2936 covariance_ = CovarianceMatrix(dimension);
2937 // First the diagonal terms, which are the marginal covariances
2938 // Uniform marginals, the diagonal is 1/12
2939 for (UnsignedInteger i = 0; i < dimension; ++i)
2940 {
2941 // 0.08333333333333333333333333 = 1 / 12
2942 covariance_(i, i) = 0.08333333333333333333333333;
2943 }
2944 // Off-diagonal terms if the copula is not the independent copula
2945 if (!hasIndependentCopula())
2946 {
2947 const IteratedQuadrature integrator;
2948 const Interval unitSquare(Point(2, 0.0), Point(2, 1.0));
2949 // Performs the integration for each covariance in the strictly lower triangle of the covariance matrix
2950 // We start with the loop over the coefficients because the most expensive task is to get the 2D marginal copulas
2951 Indices indices(2);
2952 for(UnsignedInteger rowIndex = 0; rowIndex < dimension; ++rowIndex)
2953 {
2954 indices[0] = rowIndex;
2955 for(UnsignedInteger columnIndex = rowIndex + 1; columnIndex < dimension; ++columnIndex)
2956 {
2957 indices[1] = columnIndex;
2958 // For the usual case of a bidimensional copula, no need to extract marginal distributions
2959 Distribution marginalDistribution(*this);
2960 if (dimension > 2) marginalDistribution = getMarginal(indices);
2961 if (!marginalDistribution.getImplementation()->hasIndependentCopula())
2962 {
2963 // Build the integrand
2964 CopulaCovarianceWrapper functionWrapper(marginalDistribution);
2965 Function function(bindMethod<CopulaCovarianceWrapper, Point, Point>(functionWrapper, &CopulaCovarianceWrapper::kernel, 2, 1));
2966 // Compute the covariance element
2967 covariance_(rowIndex, columnIndex) = integrator.integrate(function, unitSquare)[0];
2968 }
2969 } // loop over column indices
2970 } // loop over row indices
2971 } // if !hasIndependentCopula
2972 isAlreadyComputedCovariance_ = true;
2973 } // computeCovariance
2974
2975
computeCovarianceContinuous() const2976 void DistributionImplementation::computeCovarianceContinuous() const
2977 {
2978 // We need this to initialize the covariance matrix in two cases:
2979 // + this is the first call to this routine (which could be checked by testing the dimension of the distribution and the dimension of the matrix
2980 // + the copula has changed from a non-independent one to the independent copula
2981 mean_ = getMean();
2982 covariance_ = CovarianceMatrix(dimension_);
2983 // First the diagonal terms, which are the marginal covariances
2984 // Marginal covariances
2985 const Point variance(getCenteredMoment(2));
2986 for (UnsignedInteger component = 0; component < dimension_; ++component) covariance_(component, component) = variance[component];
2987 // Off-diagonal terms if the copula is not the independent copula
2988 if (!hasIndependentCopula())
2989 {
2990 // Here we use the following expression of the covariance \Sigma_{i,j}:
2991 // \Sigma_{i,j}=\int_{\R^2}(x_i-\mu_i)(x_j-\mu_j)p_{i,j}(x_i,x_j)dx_idx_j
2992 // Do we use the adaptive quadrature algorithm?
2993 const Bool useAdaptiveAlgorithm = ResourceMap::GetAsBool("Distribution-UseCovarianceAdaptiveAlgorithm");
2994 IntegrationAlgorithm integrator;
2995 if (useAdaptiveAlgorithm) integrator = IteratedQuadrature(GaussKronrod());
2996 else integrator = GaussLegendre(Indices(2, static_cast<UnsignedInteger>(std::ceil(std::sqrt(1.0 * integrationNodesNumber_)))));
2997 // Performs the integration for each covariance in the strictly lower triangle of the covariance matrix
2998 // We loop over the coefficients in the outer loop because the most expensive task is to get the 2D marginal distributions
2999 Indices indices(2);
3000 for(UnsignedInteger rowIndex = 0; rowIndex < dimension_; ++rowIndex)
3001 {
3002 indices[0] = rowIndex;
3003 const Scalar muI = mean_[rowIndex];
3004 for (UnsignedInteger columnIndex = rowIndex + 1; columnIndex < dimension_; ++columnIndex)
3005 {
3006 indices[1] = columnIndex;
3007 const Scalar muJ = mean_[columnIndex];
3008 const Implementation marginalDistribution(getMarginal(indices).getImplementation());
3009 if (!marginalDistribution->hasIndependentCopula())
3010 {
3011 // Compute the covariance element
3012 const CovarianceWrapper kernel(marginalDistribution, muI, muJ);
3013 const Interval interval(marginalDistribution->getRange());
3014 LOGINFO(OSS() << "Compute covariance(" << rowIndex << ", " << columnIndex << ")");
3015 const Point value(integrator.integrate(kernel, interval));
3016 LOGINFO(OSS() << "covariance(" << rowIndex << ", " << columnIndex << ")=" << value[0]);
3017 covariance_(rowIndex, columnIndex) = value[0];
3018 }
3019 } // loop over column indices
3020 } // loop over row indices
3021 } // if !hasIndependentCopula
3022 isAlreadyComputedCovariance_ = true;
3023 } // computeCovarianceContinuous
3024
computeCovarianceDiscrete() const3025 void DistributionImplementation::computeCovarianceDiscrete() const
3026 {
3027 // We need this to initialize the covariance matrix in two cases:
3028 // + this is the first call to this routine (which could be checked by testing the dimension of the distribution and the dimension of the matrix
3029 // + the copula has changed from a non-independent one to the independent copula
3030 covariance_ = CovarianceMatrix(dimension_);
3031 // First the diagonal terms, which are the marginal covariances
3032 // Marginal covariances
3033 const Point variance(getCenteredMoment(2));
3034 for(UnsignedInteger component = 0; component < dimension_; ++component) covariance_(component, component) = variance[component];
3035 // Off-diagonal terms if the copula is not the independent copula
3036 if (!hasIndependentCopula())
3037 {
3038 // To ensure that the mean is up to date
3039 mean_ = getMean();
3040 // Performs the integration for each covariance in the strictly lower triangle of the covariance matrix
3041 // We first loop over the coefficients because the most expensive task is to get the 2D marginal distributions
3042 Indices indices(2);
3043 for(UnsignedInteger rowIndex = 0; rowIndex < dimension_; ++rowIndex)
3044 {
3045 indices[0] = rowIndex;
3046 const Scalar muI = mean_[rowIndex];
3047 for(UnsignedInteger columnIndex = rowIndex + 1; columnIndex < dimension_; ++columnIndex)
3048 {
3049 indices[1] = columnIndex;
3050 const Scalar muJ = mean_[columnIndex];
3051 const Implementation marginalDistribution(getMarginal(indices).getImplementation());
3052 if (!marginalDistribution->hasIndependentCopula())
3053 {
3054 const Sample support(marginalDistribution->getSupport());
3055 const Point probabilities(marginalDistribution->getProbabilities());
3056 Scalar value = 0.0;
3057 const UnsignedInteger size = support.getSize();
3058 for (UnsignedInteger i = 0; i < size; ++i) value += (support(i, 0) - muI) * (support(i, 1) - muJ) * probabilities[i];
3059 covariance_(rowIndex, columnIndex) = value;
3060 }
3061 } // loop over column indices
3062 } // loop over row indices
3063 } // if !hasIndependentCopula
3064 isAlreadyComputedCovariance_ = true;
3065 }
3066
3067
3068
computeCovarianceGeneral() const3069 void DistributionImplementation::computeCovarianceGeneral() const
3070 {
3071 // We need this to initialize the covariance matrix in two cases:
3072 // + this is the first call to this routine (which could be checked by testing the dimension of the distribution and the dimension of the matrix
3073 // + the copula has changed from a non-independent one to the independent copula
3074 covariance_ = CovarianceMatrix(dimension_);
3075 // First the diagonal terms, which are the marginal covariances
3076 // To ensure that the mean is up to date
3077 mean_ = getMean();
3078 // Get the standard deviation
3079 const Point standardDeviation(getStandardDeviation());
3080 for(UnsignedInteger component = 0; component < dimension_; ++component)
3081 covariance_(component, component) = standardDeviation[component] * standardDeviation[component];
3082 // Off-diagonal terms if the copula is not the independent copula
3083 if (!hasIndependentCopula())
3084 {
3085 Collection<Implementation> marginals(dimension_);
3086 for(UnsignedInteger component = 0; component < dimension_; ++component)
3087 marginals[component] = getMarginal(component).getImplementation();
3088 const Scalar delta = 2.0;
3089 Indices indices(2);
3090 const int N(8 * 2 * 2 * 2 * 2 * 2);
3091 const Scalar h = 0.5 / 2 / 2 / 2 / 2 / 2;
3092 for(UnsignedInteger rowIndex = 0; rowIndex < dimension_; ++rowIndex)
3093 {
3094 indices[0] = rowIndex;
3095 const Scalar mi = marginals[rowIndex]->computeQuantile(0.5)[0];
3096 const Scalar di = marginals[rowIndex]->computeQuantile(0.75)[0] - marginals[rowIndex]->computeQuantile(0.25)[0];
3097 // We compute the upper triangle in order to avoid indices swap in marginal
3098 // extraction
3099 for(UnsignedInteger columnIndex = rowIndex + 1; columnIndex < dimension_; ++columnIndex)
3100 {
3101 indices[1] = columnIndex;
3102 const Implementation marginalDistribution(getMarginal(indices).getImplementation());
3103 if (!marginalDistribution->hasIndependentCopula())
3104 {
3105 const Scalar mj = marginals[columnIndex]->computeQuantile(0.5)[0];
3106 const Scalar dj = marginals[columnIndex]->computeQuantile(0.75)[0] - marginals[columnIndex]->computeQuantile(0.25)[0];
3107 Point xij(2);
3108 xij[0] = mi;
3109 xij[1] = mj;
3110 Scalar covarianceIJ = 0.0;
3111 // Then we loop over the integration points
3112 for(int rowNodeIndex = -N; rowNodeIndex < N + 1; ++rowNodeIndex)
3113 {
3114 const Scalar hi = h * rowNodeIndex;
3115 const Scalar expHi = std::exp(hi);
3116 const Scalar iexpHi = 1.0 / expHi;
3117 const Scalar sinhHi = 0.5 * (expHi - iexpHi);
3118 const Scalar expSinhHi = std::exp(sinhHi);
3119 const Scalar iexpSinhHi = 1.0 / expSinhHi;
3120 const Scalar iTwoCoshSinhHi = 1.0 / (expSinhHi + iexpSinhHi);
3121 const Scalar xip = mi + expSinhHi * iTwoCoshSinhHi * di * delta;
3122 const Scalar wi = (expHi + iexpHi) * iTwoCoshSinhHi * iTwoCoshSinhHi;
3123 const Scalar cdfip = marginals[rowIndex]->computeCDF(xip);
3124 for(int columnNodeIndex = -N; columnNodeIndex < N + 1; ++columnNodeIndex)
3125 {
3126 const Scalar hj = h * columnNodeIndex;
3127 const Scalar expHj = std::exp(hj);
3128 const Scalar iexpHj = 1.0 / expHj;
3129 const Scalar sinhHj = 0.5 * (expHj - iexpHj);
3130 const Scalar expSinhHj = std::exp(sinhHj);
3131 const Scalar iexpSinhHj = 1.0 / expSinhHj;
3132 const Scalar iTwoCoshSinhHj = 1.0 / (expSinhHj + iexpSinhHj);
3133 const Scalar xjp = mj + expSinhHj * iTwoCoshSinhHj * dj * delta;
3134 const Scalar wj = (expHj + iexpHj) * iTwoCoshSinhHj * iTwoCoshSinhHj;
3135 const Scalar cdfjp = marginals[columnIndex]->computeCDF(xjp);
3136 Point inpp(2);
3137 inpp[0] = xip;
3138 inpp[1] = xjp;
3139 covarianceIJ += delta * delta * di * dj * h * h * wi * wj * (marginalDistribution->computeCDF(inpp) - cdfip * cdfjp);
3140 } // loop over J integration nodes
3141 } // loop over I integration nodes
3142 covariance_(rowIndex, columnIndex) = covarianceIJ;
3143 } // if !hasIndependentCopula
3144 } // loop over column indices
3145 } // loop over row indices
3146 }
3147 isAlreadyComputedCovariance_ = true;
3148 } // computeCovarianceGeneral
3149
3150 /* Get the covariance of the distribution */
getCovariance() const3151 CovarianceMatrix DistributionImplementation::getCovariance() const
3152 {
3153 if (!isAlreadyComputedCovariance_) computeCovariance();
3154 return covariance_;
3155 }
3156
3157 /* Correlation matrix accessor */
getCorrelation() const3158 CorrelationMatrix DistributionImplementation::getCorrelation() const
3159 {
3160 // To make sure the covariance is up to date
3161 (void) getCovariance();
3162 CorrelationMatrix R(dimension_);
3163 Point sigma(dimension_);
3164 for (UnsignedInteger i = 0; i < dimension_; ++i)
3165 {
3166 const Scalar sigmaI = std::sqrt(covariance_(i, i));
3167 sigma[i] = sigmaI;
3168 if (sigmaI > 0.0)
3169 for (UnsignedInteger j = 0; j < i; ++j)
3170 if (sigma[j] > 0)
3171 R(i, j) = covariance_(i, j) / (sigmaI * sigma[j]);
3172 }
3173 return R;
3174 }
3175
getPearsonCorrelation() const3176 CorrelationMatrix DistributionImplementation::getPearsonCorrelation() const
3177 {
3178 return getCorrelation();
3179 }
3180
3181 /* Get the Spearman correlation of the distribution */
getSpearmanCorrelation() const3182 CorrelationMatrix DistributionImplementation::getSpearmanCorrelation() const
3183 {
3184 if (isCopula()) return getCorrelation();
3185 return getCopula().getSpearmanCorrelation();
3186 }
3187
3188 struct DistributionImplementationKendallTauWrapper
3189 {
DistributionImplementationKendallTauWrapperDistributionImplementationKendallTauWrapper3190 DistributionImplementationKendallTauWrapper(const Distribution & distribution)
3191 : distribution_(distribution)
3192 {
3193 if (!distribution.isCopula())
3194 {
3195 const UnsignedInteger dimension = distribution.getDimension();
3196 marginalCollection_ = Collection<Distribution>(dimension);
3197 for (UnsignedInteger i = 0; i < dimension; ++i)
3198 marginalCollection_[i] = distribution.getMarginal(i);
3199 }
3200 }
3201
kernelForCopulaDistributionImplementationKendallTauWrapper3202 Point kernelForCopula(const Point & point) const
3203 {
3204 return Point(1, distribution_.computeCDF(point) * distribution_.computePDF(point));
3205 }
3206
kernelForDistributionDistributionImplementationKendallTauWrapper3207 Point kernelForDistribution(const Point & point) const
3208 {
3209 const UnsignedInteger dimension = distribution_.getDimension();
3210 Point x(dimension);
3211 Scalar factor = 1.0;
3212 for (UnsignedInteger i = 0; i < dimension; ++i)
3213 {
3214 const Point xi(marginalCollection_[i].computeQuantile(point[i]));
3215 x[i] = xi[0];
3216 factor *= marginalCollection_[i].computePDF(xi);
3217 if (std::abs(factor) < SpecFunc::Precision) return Point(1, 0.0);
3218 }
3219 return Point(1, distribution_.computeCDF(point) * distribution_.computePDF(x) / factor);
3220 }
3221
3222 const Distribution & distribution_;
3223 Collection<Distribution> marginalCollection_;
3224 }; // DistributionImplementationKendallTauWrapperx
3225
3226 /* Get the Kendall concordance of the distribution */
getKendallTau() const3227 CorrelationMatrix DistributionImplementation::getKendallTau() const
3228 {
3229 CorrelationMatrix tau(dimension_);
3230 // First special case: independent marginals
3231 if (hasIndependentCopula()) return tau;
3232 // Second special case: elliptical distribution
3233 if (hasEllipticalCopula())
3234 {
3235 const CorrelationMatrix shape(getShapeMatrix());
3236 for (UnsignedInteger i = 0; i < dimension_; ++i)
3237 for(UnsignedInteger j = 0; j < i; ++j)
3238 tau(i, j) = std::asin(shape(i, j)) * (2.0 / M_PI);
3239 return tau;
3240 }
3241 // General case
3242 const IteratedQuadrature integrator;
3243 const Interval square(2);
3244 // Performs the integration in the strictly lower triangle of the tau matrix
3245 Indices indices(2);
3246 for(UnsignedInteger rowIndex = 0; rowIndex < dimension_; ++rowIndex)
3247 {
3248 indices[0] = rowIndex;
3249 for (UnsignedInteger columnIndex = rowIndex + 1; columnIndex < dimension_; ++columnIndex)
3250 {
3251 indices[1] = columnIndex;
3252 const Distribution marginalDistribution(getMarginal(indices).getImplementation());
3253 if (!marginalDistribution.hasIndependentCopula())
3254 {
3255 // Build the integrand
3256 const DistributionImplementationKendallTauWrapper functionWrapper(marginalDistribution);
3257 Function function;
3258 if (isCopula())
3259 function = (bindMethod<DistributionImplementationKendallTauWrapper, Point, Point>(functionWrapper, &DistributionImplementationKendallTauWrapper::kernelForCopula, 2, 1));
3260 else
3261 function = (bindMethod<DistributionImplementationKendallTauWrapper, Point, Point>(functionWrapper, &DistributionImplementationKendallTauWrapper::kernelForDistribution, 2, 1));
3262 tau(rowIndex, columnIndex) = integrator.integrate(function, square)[0];
3263 }
3264 } // loop over column indices
3265 } // loop over row indices
3266 return tau;
3267 }
3268
3269 /* Get the shape matrix of the distribution, ie the correlation matrix
3270 of its copula if it is elliptical */
getShapeMatrix() const3271 CorrelationMatrix DistributionImplementation::getShapeMatrix() const
3272 {
3273 if (!hasEllipticalCopula()) throw NotDefinedException(HERE) << "Error: the shape matrix is defined only for distributions with elliptical copulas.";
3274 // Easy case: elliptical distribution
3275 if (isElliptical()) return getCorrelation();
3276 // Difficult case: elliptical distribution with nonelliptical marginals
3277 const Collection<Distribution> ellipticalMarginals(dimension_, getStandardDistribution().getMarginal(0));
3278 return ComposedDistribution(ellipticalMarginals, getCopula()).getCorrelation();
3279 }
3280
3281 /* Cholesky factor of the correlation matrix accessor */
getCholesky() const3282 TriangularMatrix DistributionImplementation::getCholesky() const
3283 {
3284 return getCovariance().computeCholesky();
3285 }
3286
3287 /* Inverse of the Cholesky factor of the correlation matrix accessor */
getInverseCholesky() const3288 TriangularMatrix DistributionImplementation::getInverseCholesky() const
3289 {
3290 // Compute its Cholesky factor
3291 TriangularMatrix cholesky(getCholesky());
3292
3293 const TriangularMatrix inverseCholesky(cholesky.solveLinearSystem(IdentityMatrix(dimension_), false).getImplementation());
3294
3295 return inverseCholesky;
3296 }
3297
3298 /* Compute the nodes and weights for a 1D gauss quadrature over [-1, 1] with respect to the Lebesgue measure */
computeGaussNodesAndWeights() const3299 void DistributionImplementation::computeGaussNodesAndWeights() const
3300 {
3301 const GaussLegendre integrator(Indices(1, integrationNodesNumber_));
3302 // Nodes
3303 gaussNodes_ = integrator.getNodes().getImplementation()->getData() * 2.0 - Point(integrationNodesNumber_, 1.0);
3304 // Weights
3305 gaussWeights_ = integrator.getWeights() * 2.0;
3306 isAlreadyComputedGaussNodesAndWeights_ = true;
3307 }
3308
3309 /* integrationNodesNumber accessors */
getIntegrationNodesNumber() const3310 UnsignedInteger DistributionImplementation::getIntegrationNodesNumber() const
3311 {
3312 return integrationNodesNumber_;
3313 }
3314
setIntegrationNodesNumber(const UnsignedInteger integrationNodesNumber) const3315 void DistributionImplementation::setIntegrationNodesNumber(const UnsignedInteger integrationNodesNumber) const
3316 {
3317 if (integrationNodesNumber != integrationNodesNumber_)
3318 {
3319 isAlreadyComputedMean_ = false;
3320 isAlreadyComputedCovariance_ = false;
3321 isAlreadyComputedGaussNodesAndWeights_ = false;
3322 integrationNodesNumber_ = integrationNodesNumber;
3323 }
3324 }
3325
3326
3327 /* Gauss nodes and weights accessor */
getGaussNodesAndWeights(Point & weights) const3328 Point DistributionImplementation::getGaussNodesAndWeights(Point & weights) const
3329 {
3330 if (!isAlreadyComputedGaussNodesAndWeights_) computeGaussNodesAndWeights();
3331 weights = gaussWeights_;
3332 return gaussNodes_;
3333 }
3334
3335
3336 /* Get the moments of the standardized distribution */
getStandardMoment(const UnsignedInteger n) const3337 Point DistributionImplementation::getStandardMoment(const UnsignedInteger n) const
3338 {
3339 return getStandardRepresentative().getMoment(n);
3340 }
3341
3342
3343 /* Get the shifted moments of the distribution */
getShiftedMoment(const UnsignedInteger n,const Point & shift) const3344 Point DistributionImplementation::getShiftedMoment(const UnsignedInteger n,
3345 const Point & shift) const
3346 {
3347 if (isContinuous()) return computeShiftedMomentContinuous(n, shift);
3348 if (isDiscrete()) return computeShiftedMomentDiscrete(n, shift);
3349 return computeShiftedMomentGeneral(n, shift);
3350 }
3351
computeShiftedMomentContinuous(const UnsignedInteger n,const Point & shift) const3352 Point DistributionImplementation::computeShiftedMomentContinuous(const UnsignedInteger n,
3353 const Point & shift) const
3354 {
3355 if (shift.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the shift dimension must match the distribution dimension.";
3356 if (n == 0) return Point(dimension_, 1.0);
3357 Point moment(dimension_);
3358 // For each component
3359 GaussKronrod algo;
3360 for(UnsignedInteger component = 0; component < dimension_; ++component)
3361 {
3362 const Implementation marginalDistribution(getMarginal(component).getImplementation());
3363 const ShiftedMomentWrapper integrand(n, shift[component], marginalDistribution);
3364 const Scalar a = marginalDistribution->getRange().getLowerBound()[0];
3365 const Scalar b = marginalDistribution->getRange().getUpperBound()[0];
3366 moment[component] = algo.integrate(integrand, Interval(a, b))[0];
3367 } // End of each component
3368 return moment;
3369 }
3370
computeShiftedMomentDiscrete(const UnsignedInteger n,const Point & shift) const3371 Point DistributionImplementation::computeShiftedMomentDiscrete(const UnsignedInteger n,
3372 const Point & shift) const
3373 {
3374 if (n == 0) throw InvalidArgumentException(HERE) << "Error: the centered moments of order 0 are undefined.";
3375 if (shift.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the shift dimension must match the distribution dimension.";
3376 Point moment(dimension_);
3377 const Sample support(getSupport());
3378 const Point probabilities(getProbabilities());
3379 for (UnsignedInteger i = 0; i < support.getSize(); ++i)
3380 for (UnsignedInteger j = 0; j < dimension_; ++j)
3381 moment[j] += std::pow(support(i, j) - shift[j], static_cast<int>(n)) * probabilities[i];
3382 return moment;
3383 }
3384
computeShiftedMomentGeneral(const UnsignedInteger n,const Point & shift) const3385 Point DistributionImplementation::computeShiftedMomentGeneral(const UnsignedInteger n,
3386 const Point & shift) const
3387 {
3388 if (n == 0) throw InvalidArgumentException(HERE) << "Error: the centered moments of order 0 are undefined.";
3389 if (shift.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the shift dimension must match the distribution dimension.";
3390 Point moment(dimension_);
3391 const Scalar epsilon = std::sqrt(quantileEpsilon_);
3392 const UnsignedInteger MaximumLevel = ResourceMap::GetAsUnsignedInteger("Distribution-DefaultLevelNumber") + 3;
3393 // For each component
3394 for(UnsignedInteger component = 0; component < dimension_; ++component)
3395 {
3396 Scalar h = 0.5;
3397 UnsignedInteger N = 6;
3398 const Implementation marginalDistribution(getMarginal(component).getImplementation());
3399 const Scalar shiftComponent = shift[component];
3400 // Central term
3401 moment[component] = h * 0.5 * std::pow(marginalDistribution->computeQuantile(0.5)[0], static_cast<int>(n));
3402 // First block
3403 for (UnsignedInteger j = 1; j <= N; ++j)
3404 {
3405 const Scalar hj = h * j;
3406 const Scalar expHj = std::exp(hj);
3407 const Scalar iexpHj = 1.0 / expHj;
3408 const Scalar sinhHj = 0.5 * (expHj - iexpHj);
3409 const Scalar expSinhHj = std::exp(sinhHj);
3410 const Scalar iexpSinhHj = 1.0 / expSinhHj;
3411 const Scalar iTwoCoshSinhHj = 1.0 / (expSinhHj + iexpSinhHj);
3412 const Scalar xjm = iexpSinhHj * iTwoCoshSinhHj;
3413 const Scalar xjp = expSinhHj * iTwoCoshSinhHj;
3414 const Scalar wj = (expHj + iexpHj) * iTwoCoshSinhHj * iTwoCoshSinhHj;
3415 moment[component] += h * wj * (std::pow(marginalDistribution->computeQuantile(xjm)[0] - shiftComponent, static_cast<int>(n)) + std::pow(marginalDistribution->computeQuantile(xjp)[0] - shiftComponent, static_cast<int>(n)));
3416 } // End of first block
3417 //values[0] = moment[component];
3418 // Sequential addition of half-blocks
3419 Scalar error = 1.0;
3420 UnsignedInteger level = 0;
3421 while( (error > epsilon) && (level < MaximumLevel))
3422 {
3423 ++level;
3424 h *= 0.5;
3425 moment[component] *= 0.5;
3426 Scalar delta = 0.0;
3427 for (UnsignedInteger j = 0; j <= N; ++j)
3428 {
3429 const Scalar hj = h * (2 * j + 1);
3430 const Scalar expHj = std::exp(hj);
3431 const Scalar iexpHj = 1.0 / expHj;
3432 const Scalar sinhHj = 0.5 * (expHj - iexpHj);
3433 const Scalar expSinhHj = std::exp(sinhHj);
3434 const Scalar iexpSinhHj = 1.0 / expSinhHj;
3435 const Scalar iTwoCoshSinhHj = 1.0 / (expSinhHj + iexpSinhHj);
3436 const Scalar xjm = iexpSinhHj * iTwoCoshSinhHj;
3437 const Scalar xjp = expSinhHj * iTwoCoshSinhHj;
3438 Scalar wj = (expHj + iexpHj) * iTwoCoshSinhHj * iTwoCoshSinhHj;
3439 delta += h * wj * (std::pow(marginalDistribution->computeQuantile(xjm)[0] - shiftComponent, static_cast<int>(n)) + std::pow(marginalDistribution->computeQuantile(xjp)[0] - shiftComponent, static_cast<int>(n)));
3440 }
3441 error = std::abs((delta - moment[component]) / (1.0 + std::abs(delta)));
3442 moment[component] += delta;
3443 N *= 2;
3444 } // End of half-block
3445 } // End of each component
3446 return moment;
3447 }
3448
3449 /* Check if the distribution is elliptical */
isElliptical() const3450 Bool DistributionImplementation::isElliptical() const
3451 {
3452 return false;
3453 }
3454
3455 /* Check if the distribution is a copula */
isCopula() const3456 Bool DistributionImplementation::isCopula() const
3457 {
3458 return isCopula_;
3459 }
3460
3461 /* Check if the distribution is continuous */
isContinuous() const3462 Bool DistributionImplementation::isContinuous() const
3463 {
3464 return true;
3465 }
3466
3467 /* Check if the distribution is discrete */
isDiscrete() const3468 Bool DistributionImplementation::isDiscrete() const
3469 {
3470 return false;
3471 }
3472
3473 /* Tell if the distribution is integer valued */
isIntegral() const3474 Bool DistributionImplementation::isIntegral() const
3475 {
3476 return false;
3477 }
3478
3479 /* Tell if the distribution has elliptical copula */
hasEllipticalCopula() const3480 Bool DistributionImplementation::hasEllipticalCopula() const
3481 {
3482 return dimension_ == 1;
3483 }
3484
3485 /* Tell if the distribution has independent copula */
hasIndependentCopula() const3486 Bool DistributionImplementation::hasIndependentCopula() const
3487 {
3488 return dimension_ == 1;
3489 }
3490
3491 /* Get the support of a distribution that intersect a given interval */
getSupport(const Interval &) const3492 Sample DistributionImplementation::getSupport(const Interval & ) const
3493 {
3494 throw NotYetImplementedException(HERE) << "In DistributionImplementation::getSupport(const Interval & interval) const";
3495 }
3496
3497 /* Get the support on the whole range */
getSupport() const3498 Sample DistributionImplementation::getSupport() const
3499 {
3500 return getSupport(getRange());
3501 }
3502
3503 /* Get the discrete probability levels */
getProbabilities() const3504 Point DistributionImplementation::getProbabilities() const
3505 {
3506 if (!isDiscrete())
3507 throw InternalException(HERE) << "Error: cannot return probability levels of a non discrete distribution.";
3508
3509 return computePDF(getSupport()).getImplementation()->getData();
3510 }
3511
3512 /* Get the PDF singularities inside of the range - 1D only */
getSingularities() const3513 Point DistributionImplementation::getSingularities() const
3514 {
3515 if (dimension_ != 1) throw NotDefinedException(HERE) << "Error: cannot ask for PDF singularities for multivariate distributions.";
3516 return Point(0);
3517 }
3518
3519 /* Compute the density generator of the elliptical generator, i.e.
3520 * the function phi such that the density of the distribution can
3521 * be written as p(x) = phi(t(x-mu)R(x-mu))
3522 */
computeDensityGenerator(const Scalar) const3523 Scalar DistributionImplementation::computeDensityGenerator(const Scalar ) const
3524 {
3525 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeDensityGenerator(const Scalar betaSquare) const";
3526 }
3527
3528 /* Compute the derivative of the density generator */
computeDensityGeneratorDerivative(const Scalar) const3529 Scalar DistributionImplementation::computeDensityGeneratorDerivative(const Scalar ) const
3530 {
3531 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeDensityGeneratorDerivative(const Scalar betaSquare) const";
3532 }
3533
3534 /* Compute the seconde derivative of the density generator */
computeDensityGeneratorSecondDerivative(const Scalar) const3535 Scalar DistributionImplementation::computeDensityGeneratorSecondDerivative(const Scalar ) const
3536 {
3537 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeDensityGeneratorSecondDerivative(const Scalar betaSquare) const";
3538 }
3539
3540 /* Get the i-th marginal distribution */
getMarginal(const UnsignedInteger i) const3541 Distribution DistributionImplementation::getMarginal(const UnsignedInteger i) const
3542 {
3543 if (isCopula())
3544 return new IndependentCopula(1);
3545 return getMarginal(Indices(1, i));
3546 }
3547
3548 /* Get the distribution of the marginal distribution corresponding to indices dimensions */
getMarginal(const Indices & indices) const3549 Distribution DistributionImplementation::getMarginal(const Indices & indices) const
3550 {
3551 if (!indices.check(dimension_)) throw InvalidArgumentException(HERE) << "Marginal indices cannot exceed dimension";
3552 Indices full(dimension_);
3553 full.fill();
3554 if (indices == full) return clone();
3555 if (isCopula() && (indices.getSize() == 1)) return new Uniform(0.0, 1.0);
3556 return new MarginalDistribution(*this, indices);
3557 }
3558
3559 /* Get the copula of a distribution */
getCopula() const3560 Distribution DistributionImplementation::getCopula() const
3561 {
3562 if (dimension_ == 1) return new IndependentCopula(1);
3563 if (isCopula()) return clone();
3564 return new SklarCopula(*this);
3565 }
3566
3567 /* Get the isoprobabilist transformation */
getIsoProbabilisticTransformation() const3568 DistributionImplementation::IsoProbabilisticTransformation DistributionImplementation::getIsoProbabilisticTransformation() const
3569 {
3570 // Special case for dimension 1
3571 if (dimension_ == 1)
3572 {
3573 DistributionCollection collection(1, *this);
3574 // Get the marginal transformation evaluation implementation
3575 const MarginalTransformationEvaluation evaluation(collection, MarginalTransformationEvaluation::FROM, Normal());
3576 // Get the marginal transformation gradient implementation
3577 const Gradient gradient(new MarginalTransformationGradient(evaluation));
3578 // Get the marginal transformation hessian implementation
3579 const Hessian hessian(new MarginalTransformationHessian(evaluation));
3580 InverseIsoProbabilisticTransformation inverseTransformation(evaluation, gradient, hessian);
3581 PointWithDescription parameters(getParameter());
3582 const UnsignedInteger parametersDimension = parameters.getDimension();
3583 Description parametersDescription(parameters.getDescription());
3584 const String name(parameters.getName());
3585 for (UnsignedInteger i = 0; i < parametersDimension; i++) parametersDescription[i] = OSS() << name << "_" << parametersDescription[i];
3586 parameters.setDescription(parametersDescription);
3587 inverseTransformation.setParameter(parameters);
3588 return inverseTransformation;
3589 }
3590 // General case, Rosenblatt transformation
3591 return FunctionImplementation(new RosenblattEvaluation(clone()));
3592 }
3593
3594 /* Get the inverse isoprobabilist transformation */
getInverseIsoProbabilisticTransformation() const3595 DistributionImplementation::InverseIsoProbabilisticTransformation DistributionImplementation::getInverseIsoProbabilisticTransformation() const
3596 {
3597 // Special case for dimension 1
3598 if (dimension_ == 1)
3599 {
3600 DistributionCollection collection(1, *this);
3601 // Get the marginal transformation evaluation implementation
3602 MarginalTransformationEvaluation evaluation(collection, MarginalTransformationEvaluation::TO, Normal());
3603 // Get the marginal transformation gradient implementation
3604 const Gradient gradient(new MarginalTransformationGradient(evaluation));
3605 // Get the marginal transformation hessian implementation
3606 const Hessian hessian(new MarginalTransformationHessian(evaluation));
3607 InverseIsoProbabilisticTransformation inverseTransformation(evaluation, gradient, hessian);
3608 PointWithDescription parameters(getParameter());
3609 const UnsignedInteger parametersDimension = parameters.getDimension();
3610 Description parametersDescription(parameters.getDescription());
3611 const String name(parameters.getName());
3612 for (UnsignedInteger i = 0; i < parametersDimension; i++) parametersDescription[i] = OSS() << name << "_" << parametersDescription[i];
3613 parameters.setDescription(parametersDescription);
3614 inverseTransformation.setParameter(parameters);
3615 return inverseTransformation;
3616 }
3617 // General case, inverse Rosenblatt transformation
3618 return FunctionImplementation(new InverseRosenblattEvaluation(clone()));
3619 }
3620
3621 /* Get the standard distribution */
computeStandardDistribution() const3622 void DistributionImplementation::computeStandardDistribution() const
3623 {
3624 Normal standardDistribution(dimension_);
3625 standardDistribution.setDescription(getDescription());
3626 p_standardDistribution_ = standardDistribution.clone();
3627 isAlreadyComputedStandardDistribution_ = true;
3628 }
3629
3630 /* Get the standard distribution */
getStandardDistribution() const3631 Distribution DistributionImplementation::getStandardDistribution() const
3632 {
3633 if (!isAlreadyComputedStandardDistribution_) computeStandardDistribution();
3634 return p_standardDistribution_;
3635 }
3636
3637 /* Get the standard representative in the parametric family, associated with the standard moments */
getStandardRepresentative() const3638 Distribution DistributionImplementation::getStandardRepresentative() const
3639 {
3640 return clone();
3641 }
3642
3643 /* Compute the radial distribution CDF */
computeRadialDistributionCDF(const Scalar,const Bool) const3644 Scalar DistributionImplementation::computeRadialDistributionCDF(const Scalar,
3645 const Bool ) const
3646 {
3647 throw NotYetImplementedException(HERE) << "In DistributionImplementation::computeRadialDistributionCDF(const Scalar radius, const Bool tail) const";
3648 }
3649
3650
3651 /* Draw the PDF of a discrete distribution */
drawDiscretePDF(const Scalar xMin,const Scalar xMax,const Bool logScale) const3652 Graph DistributionImplementation::drawDiscretePDF(const Scalar xMin,
3653 const Scalar xMax,
3654 const Bool logScale) const
3655 {
3656 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: cannot draw the PDF of a multidimensional discrete distribution this way.";
3657 if (xMax < xMin - ResourceMap::GetAsScalar("DiscreteDistribution-SupportEpsilon")) throw InvalidArgumentException(HERE) << "Error: cannot draw a PDF with xMax < xMin, here xmin=" << xMin << " and xmax=" << xMax;
3658 const String title(OSS() << getDescription()[0] << " PDF");
3659 const Sample support(getSupport(Interval(xMin, xMax)));
3660 // First the vertical bars
3661 const String xName(getDescription()[0]);
3662 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
3663 Graph graphPDF(title, xName, "PDF", true, "topright", 1, scale);
3664 Point point(2);
3665 point[0] = xMin - ResourceMap::GetAsScalar("DiscreteDistribution-SupportEpsilon");
3666 const Sample gridY(computePDF(support));
3667
3668 Sample data(0, 2);
3669 data.add(point);
3670 for (UnsignedInteger i = 0; i < support.getSize(); ++i)
3671 {
3672 point[0] = support(i, 0);
3673 data.add(point);
3674 point[1] = gridY(i, 0);
3675 data.add(point);
3676 point[1] = 0.0;
3677 data.add(point);
3678 }
3679 point[0] = xMax + ResourceMap::GetAsScalar("DiscreteDistribution-SupportEpsilon");
3680 point[1] = 0.0;
3681 data.add(point);
3682 graphPDF.add(Curve(data, "red", "solid", 2, title));
3683 return graphPDF;
3684 }
3685
3686 /* Draw the PDF of the distribution when its dimension is 1 */
drawPDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const3687 Graph DistributionImplementation::drawPDF(const Scalar xMin,
3688 const Scalar xMax,
3689 const UnsignedInteger pointNumber,
3690 const Bool logScale) const
3691 {
3692 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: can draw a PDF only if dimension equals 1, here dimension=" << dimension_;
3693 if (xMax <= xMin) throw InvalidArgumentException(HERE) << "Error: cannot draw a PDF with xMax <= xMin, here xmin=" << xMin << " and xmax=" << xMax;
3694 if (pointNumber < 2) throw InvalidArgumentException(HERE) << "Error: cannot draw a PDF with a point number < 2";
3695 if (isDiscrete()) return drawDiscretePDF(xMin, xMax, logScale);
3696 // Discretization of the x axis
3697 const PDFWrapper pdfWrapper(this);
3698 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
3699 Graph graphPDF(pdfWrapper.draw(xMin, xMax, pointNumber, scale));
3700 Drawable drawable(graphPDF.getDrawable(0));
3701 const String title(OSS() << getDescription()[0] << " PDF");
3702 drawable.setColor("red");
3703 drawable.setLegend(title);
3704 drawable.setLineStyle("solid");
3705 drawable.setLineWidth(2);
3706 graphPDF.setDrawable(drawable, 0);
3707 graphPDF.setXTitle(getDescription()[0]);
3708 graphPDF.setYTitle("PDF");
3709 graphPDF.setTitle("");
3710 graphPDF.setLegendPosition("topright");
3711 return graphPDF;
3712 }
3713
3714 /* Draw the PDF of the distribution when its dimension is 1 */
drawPDF(const UnsignedInteger pointNumber,const Bool logScale) const3715 Graph DistributionImplementation::drawPDF(const UnsignedInteger pointNumber,
3716 const Bool logScale) const
3717 {
3718 if (dimension_ == 2) return drawPDF(Indices(2, pointNumber), logScale, logScale);
3719 if (dimension_ != 1) throw InvalidArgumentException(HERE) << "Error: this method is available only for 1D or 2D distributions";
3720 // For discrete distributions, use the numerical range to define the drawing range
3721 const Scalar xMin = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
3722 const Scalar xMax = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
3723 const Scalar delta = 2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin")));
3724 if (isDiscrete())
3725 {
3726 Scalar a = std::max(xMin - delta, getRange().getLowerBound()[0] - 1.0);
3727 Scalar b = std::min(xMax + delta, getRange().getUpperBound()[0] + 1.0);
3728 if (b <= a)
3729 {
3730 a -= 1.0;
3731 b += 1.0;
3732 }
3733 return drawDiscretePDF(a, b, logScale);
3734 }
3735 return drawPDF(xMin - delta, xMax + delta, pointNumber, logScale);
3736 }
3737
3738 /* Draw the PDF of a 1D marginal */
drawMarginal1DPDF(const UnsignedInteger marginalIndex,const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const3739 Graph DistributionImplementation::drawMarginal1DPDF(const UnsignedInteger marginalIndex,
3740 const Scalar xMin,
3741 const Scalar xMax,
3742 const UnsignedInteger pointNumber,
3743 const Bool logScale) const
3744 {
3745 Graph marginalGraph(getMarginal(marginalIndex).drawPDF(xMin, xMax, pointNumber, logScale));
3746 marginalGraph.setTitle(OSS() << getDescription() << "->" << description_[marginalIndex] << " component PDF");
3747 return marginalGraph;
3748 }
3749
3750 /* Draw the PDF of the distribution when its dimension is 2 */
drawPDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const3751 Graph DistributionImplementation::drawPDF(const Point & xMin,
3752 const Point & xMax,
3753 const Indices & pointNumber,
3754 const Bool logScaleX,
3755 const Bool logScaleY) const
3756 {
3757 if (dimension_ == 1) return drawPDF(xMin[0], xMax[0], pointNumber[0], logScaleX);
3758 if (!(pointNumber[0] >= 2 && pointNumber[1] >= 2)) throw InvalidArgumentException(HERE) << "Error: the discretization must have at least 2 points per component";
3759 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>((logScaleX ? 1 : 0) + (logScaleY ? 2 : 0));
3760 if (isContinuous())
3761 {
3762 Graph graph(PDFWrapper(this).draw(xMin, xMax, pointNumber, scale));
3763 graph.setXTitle(description_[0]);
3764 graph.setYTitle(description_[1]);
3765 graph.setTitle(String(OSS() << getDescription() << " iso-PDF"));
3766 return graph;
3767 }
3768 if (isDiscrete())
3769 {
3770 const Sample support(getSupport());
3771 const Point probabilities(getProbabilities());
3772 const UnsignedInteger size = support.getSize();
3773 SampleImplementation fullProba(size, 1);
3774 fullProba.setData(probabilities);
3775 fullProba.stack(*support.getImplementation());
3776 fullProba.sortAccordingToAComponentInPlace(0);
3777 const Scalar pMin = fullProba(0, 0);
3778 const Scalar pMax = fullProba(size - 1, 0);
3779 const Scalar scaling = ResourceMap::GetAsScalar("Distribution-DiscreteDrawPDFScaling") / std::sqrt(pMax);
3780 const String xName(description_[0]);
3781 const String yName(description_[1]);
3782 const String title(OSS() << getDescription() << " PDF");
3783 Graph graph(title, xName, yName, true, "topright", 1, scale);
3784 const Bool scaleColors = ResourceMap::GetAsBool("Distribution-ScaleColorsDiscretePDF") && (pMin < pMax);
3785 for (UnsignedInteger i = 0; i < size; ++i)
3786 {
3787 const Scalar x = fullProba(i, 1);
3788 const Scalar y = fullProba(i, 2);
3789 const Scalar p = fullProba(i, 0);
3790 const Scalar eta = std::sqrt(p) * scaling;
3791 Sample square(4, 2);
3792 if (logScaleX)
3793 {
3794 square(0, 0) = x * (1.0 - eta);
3795 square(1, 0) = x * (1.0 - eta);
3796 square(2, 0) = x * (1.0 + eta);
3797 square(3, 0) = x * (1.0 + eta);
3798 }
3799 else
3800 {
3801 square(0, 0) = x - eta;
3802 square(1, 0) = x - eta;
3803 square(2, 0) = x + eta;
3804 square(3, 0) = x + eta;
3805 }
3806 if (logScaleY)
3807 {
3808 square(0, 1) = y * (1.0 - eta);
3809 square(1, 1) = y * (1.0 + eta);
3810 square(2, 1) = y * (1.0 + eta);
3811 square(3, 1) = y * (1.0 - eta);
3812 }
3813 else
3814 {
3815 square(0, 1) = y - eta;
3816 square(1, 1) = y + eta;
3817 square(2, 1) = y + eta;
3818 square(3, 1) = y - eta;
3819 }
3820 Polygon mark(square);
3821 const Scalar rho = (scaleColors ? (1.0 - 1.0 / size) * (p - pMin) / (pMax - pMin) : p);
3822 mark.setColor(Polygon::ConvertFromHSV(360.0 * rho, 1.0, 1.0));
3823 mark.setEdgeColor(Polygon::ConvertFromHSV(360.0 * rho, 1.0, 0.9));
3824 graph.add(mark);
3825 }
3826 if (ResourceMap::GetAsBool("Distribution-ShowSupportDiscretePDF"))
3827 {
3828 Cloud cloud(support);
3829 cloud.setColor("red");
3830 graph.add(cloud);
3831 }
3832 return graph;
3833 }
3834 throw NotYetImplementedException(HERE) << "Error: the drawPDF() method is defined only for continuous or discrete distributions.";
3835 }
3836
3837 /* Draw the PDF of the distribution when its dimension is 2 */
drawPDF(const Point & xMin,const Point & xMax,const Bool logScaleX,const Bool logScaleY) const3838 Graph DistributionImplementation::drawPDF(const Point & xMin,
3839 const Point & xMax,
3840 const Bool logScaleX,
3841 const Bool logScaleY) const
3842 {
3843 return drawPDF(xMin, xMax, Indices(2, ResourceMap::GetAsUnsignedInteger("Distribution-DefaultPointNumber")), logScaleX, logScaleY);
3844 }
3845
3846 /* Draw the PDF of the distribution when its dimension is 2 */
drawPDF(const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const3847 Graph DistributionImplementation::drawPDF(const Indices & pointNumber,
3848 const Bool logScaleX,
3849 const Bool logScaleY) const
3850 {
3851 if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: pointNumber must be of size 2, here size=" << pointNumber.getSize();
3852 Point xMin(2);
3853 if (isCopula()) xMin = Point(2, 0.0);
3854 else
3855 {
3856 xMin[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
3857 xMin[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
3858 }
3859 Point xMax(2);
3860 if (isCopula()) xMax = Point(2, 1.0);
3861 else
3862 {
3863 xMax[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
3864 xMax[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
3865 }
3866 Point delta(2, 0.0);
3867 if (!isCopula()) delta = (2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin"))));
3868 const Interval intersection(getRange().intersect(Interval(xMin - delta, xMax + delta)));
3869 Graph graph(drawPDF(intersection.getLowerBound(), intersection.getUpperBound(), pointNumber, logScaleX, logScaleY));
3870 // Add a border for a copula
3871 if (isCopula())
3872 {
3873 const Drawable drawable(graph.getDrawable(0));
3874 Sample data(5, 2);
3875 data(1, 0) = 1.0;
3876 data[2] = Point(2, 1.0);
3877 data(3, 1) = 1.0;
3878 Curve square(data);
3879 square.setColor("blue");
3880 graph.setDrawable(square, 0);
3881 graph.add(drawable);
3882 }
3883 return graph;
3884 }
3885
3886 /* Draw the PDF of a 2D marginal */
drawMarginal2DPDF(const UnsignedInteger firstMarginal,const UnsignedInteger secondMarginal,const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const3887 Graph DistributionImplementation::drawMarginal2DPDF(const UnsignedInteger firstMarginal,
3888 const UnsignedInteger secondMarginal,
3889 const Point & xMin,
3890 const Point & xMax,
3891 const Indices & pointNumber,
3892 const Bool logScaleX,
3893 const Bool logScaleY) const
3894 {
3895 Indices indices = {firstMarginal, secondMarginal};
3896 Graph marginalGraph(getMarginal(indices).drawPDF(xMin, xMax, pointNumber, logScaleX, logScaleY));
3897 marginalGraph.setTitle(OSS() << getDescription() << "->[" << description_[firstMarginal] << ", " << description_[secondMarginal] << "] components iso-PDF");
3898 return marginalGraph;
3899 }
3900
3901 /* Draw the log-PDF of a discrete distribution */
drawDiscreteLogPDF(const Scalar xMin,const Scalar xMax,const Bool logScale) const3902 Graph DistributionImplementation::drawDiscreteLogPDF(const Scalar xMin,
3903 const Scalar xMax,
3904 const Bool logScale) const
3905 {
3906 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: cannot draw the PDF of a multidimensional discrete distribution this way.";
3907 if (xMax < xMin - ResourceMap::GetAsScalar("DiscreteDistribution-SupportEpsilon")) throw InvalidArgumentException(HERE) << "Error: cannot draw a PDF with xMax < xMin, here xmin=" << xMin << " and xmax=" << xMax;
3908 const String title(OSS() << getDescription()[0] << " PDF");
3909 const Sample support(getSupport(Interval(xMin, xMax)));
3910 // First the vertical bars
3911 const String xName(getDescription()[0]);
3912 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
3913 Graph graphLogPDF(title, xName, "PDF", true, "topright", 1, scale);
3914 Point point(2);
3915 point[0] = xMin - ResourceMap::GetAsScalar("DiscreteDistribution-SupportEpsilon");
3916 const Sample gridY(computeLogPDF(support));
3917
3918 Sample data(0, 2);
3919 data.add(point);
3920 for (UnsignedInteger i = 0; i < support.getSize(); ++i)
3921 {
3922 point[0] = support(i, 0);
3923 data.add(point);
3924 point[1] = gridY(i, 0);
3925 data.add(point);
3926 point[1] = 0.0;
3927 data.add(point);
3928 }
3929 point[0] = xMax + ResourceMap::GetAsScalar("DiscreteDistribution-SupportEpsilon");
3930 point[1] = 0.0;
3931 data.add(point);
3932 graphLogPDF.add(Curve(data, "red", "solid", 2, title));
3933 return graphLogPDF;
3934 }
3935
3936 /* Draw the log-PDF of the distribution when its dimension is 1 */
drawLogPDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const3937 Graph DistributionImplementation::drawLogPDF(const Scalar xMin,
3938 const Scalar xMax,
3939 const UnsignedInteger pointNumber,
3940 const Bool logScale) const
3941 {
3942 if (dimension_ == 2) return drawLogPDF(Indices(2, pointNumber), logScale, logScale);
3943 if (dimension_ != 1) throw InvalidArgumentException(HERE) << "Error: this method is available only for 1D or 2D distributions";
3944 if (xMax <= xMin) throw InvalidArgumentException(HERE) << "Error: cannot draw a logPDF with xMax <= xMin, here xmin=" << xMin << " and xmax=" << xMax;
3945 if (pointNumber < 2) throw InvalidArgumentException(HERE) << "Error: cannot draw a logPDF with a point number < 2";
3946 if (isDiscrete()) return drawDiscreteLogPDF(xMin, xMax, logScale);
3947 // Discretization of the x axis
3948 const LogPDFWrapper logPdfWrapper(this);
3949 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
3950 Graph graphLogPDF(logPdfWrapper.draw(xMin, xMax, pointNumber, scale));
3951 Drawable drawable(graphLogPDF.getDrawable(0));
3952 const String title(OSS() << getDescription()[0] << "log PDF");
3953 drawable.setColor("red");
3954 drawable.setLegend(title);
3955 drawable.setLineStyle("solid");
3956 drawable.setLineWidth(2);
3957 graphLogPDF.setDrawable(drawable, 0);
3958 graphLogPDF.setXTitle(getDescription()[0]);
3959 graphLogPDF.setYTitle("log PDF");
3960 graphLogPDF.setTitle("");
3961 graphLogPDF.setLegendPosition("topright");
3962 return graphLogPDF;
3963 }
3964
3965 /* Draw the log-PDF of the distribution when its dimension is 1 */
drawLogPDF(const UnsignedInteger pointNumber,const Bool logScale) const3966 Graph DistributionImplementation::drawLogPDF(const UnsignedInteger pointNumber,
3967 const Bool logScale) const
3968 {
3969 if (dimension_ == 2) return drawLogPDF(Indices(2, pointNumber), logScale, logScale);
3970 if (dimension_ != 1) throw InvalidArgumentException(HERE) << "Error: this method is available only for 1D or 2D distributions";
3971 // For discrete distributions, use the numerical range to define the drawing range
3972 const Scalar xMin = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
3973 const Scalar xMax = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
3974 const Scalar delta = 2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin")));
3975 if (isDiscrete())
3976 {
3977 Scalar a = std::max(xMin - delta, getRange().getLowerBound()[0] - 1.0);
3978 Scalar b = std::min(xMax + delta, getRange().getUpperBound()[0] + 1.0);
3979 if (b <= a)
3980 {
3981 a -= 1.0;
3982 b += 1.0;
3983 }
3984 return drawDiscreteLogPDF(a, b, logScale);
3985 }
3986 return drawLogPDF(xMin - delta, xMax + delta, pointNumber, logScale);
3987 }
3988
3989 /* Draw the log-PDF of a 1D marginal */
drawMarginal1DLogPDF(const UnsignedInteger marginalIndex,const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const3990 Graph DistributionImplementation::drawMarginal1DLogPDF(const UnsignedInteger marginalIndex,
3991 const Scalar xMin,
3992 const Scalar xMax,
3993 const UnsignedInteger pointNumber,
3994 const Bool logScale) const
3995 {
3996 Graph marginalGraph(getMarginal(marginalIndex).drawLogPDF(xMin, xMax, pointNumber, logScale));
3997 marginalGraph.setTitle(OSS() << getDescription() << "->" << description_[marginalIndex] << " component log PDF");
3998 return marginalGraph;
3999 }
4000
4001 /* Draw the log-PDF of the distribution when its dimension is 2 */
drawLogPDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4002 Graph DistributionImplementation::drawLogPDF(const Point & xMin,
4003 const Point & xMax,
4004 const Indices & pointNumber,
4005 const Bool logScaleX,
4006 const Bool logScaleY) const
4007 {
4008 if (dimension_ == 1) return drawLogPDF(xMin[0], xMax[0], pointNumber[0], logScaleX);
4009 if (!(pointNumber[0] >= 2 && pointNumber[1] >= 2)) throw InvalidArgumentException(HERE) << "Error: the discretization must have at least 2 points per component";
4010 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>((logScaleX ? 1 : 0) + (logScaleY ? 2 : 0));
4011 if (isContinuous())
4012 {
4013 Graph graph(LogPDFWrapper(this).draw(xMin, xMax, pointNumber, scale));
4014 graph.setXTitle(description_[0]);
4015 graph.setYTitle(description_[1]);
4016 graph.setTitle(String(OSS() << getDescription() << " iso-LogPDF"));
4017 return graph;
4018 }
4019 if (isDiscrete())
4020 {
4021 const Sample support(getSupport());
4022 const Point probabilities(getProbabilities());
4023 const UnsignedInteger size = support.getSize();
4024 SampleImplementation fullProba(size, 1);
4025 fullProba.setData(probabilities);
4026 fullProba.stack(*support.getImplementation());
4027 fullProba.sortAccordingToAComponentInPlace(0);
4028 const Scalar absLogPMin = -std::log(fullProba(0, 0));
4029 const Scalar absLogPMax = -std::log(fullProba(size - 1, 0));
4030 const Scalar scaling = ResourceMap::GetAsScalar("Distribution-DiscreteDrawPDFScaling") / std::sqrt(absLogPMin);
4031 const String xName(description_[0]);
4032 const String yName(description_[1]);
4033 const String title(OSS() << getDescription() << " PDF");
4034 Graph graph(title, xName, yName, true, "topright", 1, scale);
4035 const Bool scaleColors = ResourceMap::GetAsBool("Distribution-ScaleColorsDiscretePDF") && (absLogPMin < absLogPMax);
4036 for (UnsignedInteger i = 0; i < size; ++i)
4037 {
4038 const Scalar x = fullProba(i, 1);
4039 const Scalar y = fullProba(i, 2);
4040 const Scalar absLogP = -std::log(fullProba(i, 0));
4041 const Scalar eta = scaling * std::sqrt(absLogP);
4042 Sample square(4, 2);
4043 if (logScaleX)
4044 {
4045 square(0, 0) = x * (1.0 - eta);
4046 square(1, 0) = x * (1.0 - eta);
4047 square(2, 0) = x * (1.0 + eta);
4048 square(3, 0) = x * (1.0 + eta);
4049 }
4050 else
4051 {
4052 square(0, 0) = x - eta;
4053 square(1, 0) = x - eta;
4054 square(2, 0) = x + eta;
4055 square(3, 0) = x + eta;
4056 }
4057 if (logScaleY)
4058 {
4059 square(0, 1) = y * (1.0 - eta);
4060 square(1, 1) = y * (1.0 + eta);
4061 square(2, 1) = y * (1.0 + eta);
4062 square(3, 1) = y * (1.0 - eta);
4063 }
4064 else
4065 {
4066 square(0, 1) = y - eta;
4067 square(1, 1) = y + eta;
4068 square(2, 1) = y + eta;
4069 square(3, 1) = y - eta;
4070 }
4071 Polygon mark(square);
4072 const Scalar rho = (scaleColors ? (1.0 - 1.0 / size) * (absLogP - absLogPMin) / (absLogPMax - absLogPMin) : absLogP);
4073 mark.setColor(Polygon::ConvertFromHSV(360.0 * rho, 1.0, 1.0));
4074 mark.setEdgeColor(Polygon::ConvertFromHSV(360.0 * rho, 1.0, 0.9));
4075 graph.add(mark);
4076 }
4077 if (ResourceMap::GetAsBool("Distribution-ShowSupportDiscretePDF"))
4078 {
4079 Cloud cloud(support);
4080 cloud.setColor("red");
4081 graph.add(cloud);
4082 }
4083 return graph;
4084 }
4085 throw NotYetImplementedException(HERE) << "Error: the drawPDF() method is defined only for continuous or discrete distributions.";
4086 }
4087
4088 /* Draw the log-PDF of the distribution when its dimension is 2 */
drawLogPDF(const Point & xMin,const Point & xMax,const Bool logScaleX,const Bool logScaleY) const4089 Graph DistributionImplementation::drawLogPDF(const Point & xMin,
4090 const Point & xMax,
4091 const Bool logScaleX,
4092 const Bool logScaleY) const
4093 {
4094 return drawLogPDF(xMin, xMax, Indices(2, ResourceMap::GetAsUnsignedInteger("Distribution-DefaultPointNumber")), logScaleX, logScaleY);
4095 }
4096
4097 /* Draw the log-PDF of the distribution when its dimension is 2 */
drawLogPDF(const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4098 Graph DistributionImplementation::drawLogPDF(const Indices & pointNumber,
4099 const Bool logScaleX,
4100 const Bool logScaleY) const
4101 {
4102 if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: pointNumber must be of size 2, here size=" << pointNumber.getSize();
4103 Point xMin(2);
4104 if (isCopula()) xMin = Point(2, 0.0);
4105 else
4106 {
4107 xMin[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4108 xMin[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4109 }
4110 Point xMax(2);
4111 if (isCopula()) xMax = Point(2, 1.0);
4112 else
4113 {
4114 xMax[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4115 xMax[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4116 }
4117 Point delta(2, 0.0);
4118 if (!isCopula()) delta = (2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin"))));
4119 const Interval intersection(getRange().intersect(Interval(xMin - delta, xMax + delta)));
4120 Graph graph(drawLogPDF(intersection.getLowerBound(), intersection.getUpperBound(), pointNumber, logScaleX, logScaleY));
4121 // Add a border for a copula
4122 if (isCopula())
4123 {
4124 const Drawable drawable(graph.getDrawable(0));
4125 Sample data(5, 2);
4126 data(0, 0) = (logScaleX ? std::log(SpecFunc::Precision) : 0.0);
4127 data(0, 1) = (logScaleY ? std::log(SpecFunc::Precision) : 0.0);
4128 data(1, 0) = 1.0;
4129 data(1, 1) = (logScaleY ? std::log(SpecFunc::Precision) : 0.0);
4130 data(2, 0) = 1.0;
4131 data(2, 1) = 1.0;
4132 data(3, 0) = (logScaleX ? std::log(SpecFunc::Precision) : 0.0);
4133 data(3, 1) = 1.0;
4134 data[4] = data[0];
4135 Curve square(data);
4136 square.setColor("blue");
4137 graph.setDrawable(square, 0);
4138 graph.add(drawable);
4139 }
4140 return graph;
4141 }
4142
4143 /* Draw the log-PDF of a 2D marginal */
drawMarginal2DLogPDF(const UnsignedInteger firstMarginal,const UnsignedInteger secondMarginal,const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4144 Graph DistributionImplementation::drawMarginal2DLogPDF(const UnsignedInteger firstMarginal,
4145 const UnsignedInteger secondMarginal,
4146 const Point & xMin,
4147 const Point & xMax,
4148 const Indices & pointNumber,
4149 const Bool logScaleX,
4150 const Bool logScaleY) const
4151 {
4152 Indices indices = {firstMarginal, secondMarginal};
4153 Graph marginalGraph(getMarginal(indices).drawLogPDF(xMin, xMax, pointNumber, logScaleX, logScaleY));
4154 marginalGraph.setTitle(OSS() << getDescription() << "->[" << description_[firstMarginal] << ", " << description_[secondMarginal] << "] components iso-log PDF");
4155 return marginalGraph;
4156 }
4157
4158 /* Draw the CDF of a discrete distribution */
drawDiscreteCDF(const Scalar xMin,const Scalar xMax,const Bool logScale) const4159 Graph DistributionImplementation::drawDiscreteCDF(const Scalar xMin,
4160 const Scalar xMax,
4161 const Bool logScale) const
4162 {
4163 // Value : 0 1/5 2/5 3/5 4/5 1
4164 // Data : ------+-----+---+------+----+---------
4165 // Case 1: ------------------------------[----]--
4166 // Case 2: ------------------[---]---------------
4167 // -[--]---------------------------------
4168 // Case 3: ----------[---]-----------------------
4169 // ---[-----------------------------]----
4170 // -------[-----------------]------------
4171 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: cannot draw the CDF of a multidimensional discrete distribution this way.";
4172 if (xMax <= xMin) throw InvalidArgumentException(HERE) << "Error: cannot draw a PDF with xMax >= xMin, here xmin=" << xMin << " and xmax=" << xMax;
4173 // Create the graph that will store the staircase representing the empirical CDF
4174 const String title(OSS() << getDescription()[0] << " CDF");
4175 const Sample support(getSupport(Interval(xMin, xMax)));
4176 const Sample gridY(computeCDF(support));
4177 const UnsignedInteger size = support.getSize();
4178 if (size == 0) throw InvalidArgumentException(HERE) << "empty range (" << xMin << ", " << xMax << ")" << ", support is (" << getSupport().getMin()[0] << ", " << getSupport().getMax()[0] << ")";
4179 const String xName(getDescription()[0]);
4180 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
4181 Graph graphCDF(title, xName, "CDF", true, "topleft", 1, scale);
4182 Sample data(size + 2, 2);
4183 data(0, 0) = xMin;
4184 data(0, 1) = computeCDF(xMin);
4185 for (UnsignedInteger i = 0; i < size; ++i)
4186 {
4187 const Scalar x = support(i, 0);
4188 data(i + 1, 0) = x;
4189 data(i + 1, 1) = gridY(i, 0);
4190 }
4191 if (support(size - 1, 0) == xMax) data[size + 1] = data[size];
4192 else
4193 {
4194 data(size + 1, 0) = xMax;
4195 data(size + 1, 1) = computeCDF(xMax);
4196 }
4197 graphCDF.add(Staircase(data, "red", "solid", 2, "s", title));
4198 return graphCDF;
4199 }
4200
4201 /* Draw the CDF of the distribution when its dimension is 1 */
drawCDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const4202 Graph DistributionImplementation::drawCDF(const Scalar xMin,
4203 const Scalar xMax,
4204 const UnsignedInteger pointNumber,
4205 const Bool logScale) const
4206 {
4207 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: can draw a CDF only if dimension equals 1, here dimension=" << dimension_;
4208 if (xMax <= xMin) throw InvalidArgumentException(HERE) << "Error: cannot draw a CDF with xMax >= xMin, here xmin=" << xMin << " and xmax=" << xMax;
4209 if (pointNumber < 2) throw InvalidArgumentException(HERE) << "Error: cannot draw a CDF with a point number < 2";
4210 if (isDiscrete()) return drawDiscreteCDF(xMin, xMax, logScale);
4211 const CDFWrapper cdfWrapper(this);
4212 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
4213 Graph graphCDF(cdfWrapper.draw(xMin, xMax, pointNumber, scale));
4214 Drawable drawable(graphCDF.getDrawable(0));
4215 const String title(OSS() << getDescription()[0] << " CDF");
4216 drawable.setColor("red");
4217 drawable.setLegend(title);
4218 drawable.setLineStyle("solid");
4219 drawable.setLineWidth(2);
4220 graphCDF.setDrawable(drawable, 0);
4221 graphCDF.setXTitle(getDescription()[0]);
4222 graphCDF.setYTitle("CDF");
4223 graphCDF.setTitle("");
4224 graphCDF.setLegendPosition("topleft");
4225 return graphCDF;
4226 }
4227
4228 /* Draw the CDF of the distribution when its dimension is 1 */
drawCDF(const UnsignedInteger pointNumber,const Bool logScale) const4229 Graph DistributionImplementation::drawCDF(const UnsignedInteger pointNumber,
4230 const Bool logScale) const
4231 {
4232 if (dimension_ == 2) return drawCDF(Indices(2, pointNumber), logScale, logScale);
4233 if (dimension_ != 1) throw InvalidArgumentException(HERE) << "Error: this method is available only for 1D or 2D distributions";
4234 // For discrete distributions, use the numerical range to define the drawing range
4235 const Scalar xMin = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4236 const Scalar xMax = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4237 const Scalar delta = 2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin")));
4238 if (isDiscrete())
4239 {
4240 Scalar a = std::max(xMin - delta, getRange().getLowerBound()[0] - 1.0);
4241 Scalar b = std::min(xMax + delta, getRange().getUpperBound()[0] + 1.0);
4242 if (b <= a)
4243 {
4244 a -= 1.0;
4245 b += 1.0;
4246 }
4247 return drawDiscreteCDF(a, b, logScale);
4248 }
4249 return drawCDF(xMin - delta, xMax + delta, pointNumber, logScale);
4250 }
4251
4252 /* Draw the CDF of a 1D marginal */
drawMarginal1DCDF(const UnsignedInteger marginalIndex,const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const4253 Graph DistributionImplementation::drawMarginal1DCDF(const UnsignedInteger marginalIndex,
4254 const Scalar xMin,
4255 const Scalar xMax,
4256 const UnsignedInteger pointNumber,
4257 const Bool logScale) const
4258 {
4259 Graph marginalGraph(getMarginal(marginalIndex).drawCDF(xMin, xMax, pointNumber, logScale));
4260 marginalGraph.setTitle(OSS() << getDescription() << "->" << description_[marginalIndex] << " component CDF");
4261 return marginalGraph;
4262 }
4263
4264 /* Draw the CDF of the distribution when its dimension is 2 */
drawCDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4265 Graph DistributionImplementation::drawCDF(const Point & xMin,
4266 const Point & xMax,
4267 const Indices & pointNumber,
4268 const Bool logScaleX,
4269 const Bool logScaleY) const
4270 {
4271 if (dimension_ == 1) return drawCDF(xMin[0], xMax[0], pointNumber[0], logScaleX);
4272 if (xMin.getDimension() != 2) throw InvalidArgumentException(HERE) << "Error: expected xMin to be of dimension 2, here dimension=" << xMin.getDimension();
4273 if (xMax.getDimension() != 2) throw InvalidArgumentException(HERE) << "Error: expected xMax to be of dimension 2, here dimension=" << xMax.getDimension();
4274 if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: expected pointNumber to be of size 2, here size=" << pointNumber.getSize();
4275 if (!(pointNumber[0] >= 2 && pointNumber[1] >= 2)) throw InvalidArgumentException(HERE) << "Error: the discretization must have at least 2 points per component";
4276 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>((logScaleX ? 1 : 0) + (logScaleY ? 2 : 0));
4277 Graph graph(CDFWrapper(this).draw(xMin, xMax, pointNumber, scale));
4278 graph.setXTitle(description_[0]);
4279 graph.setYTitle(description_[1]);
4280 graph.setTitle(String(OSS() << getDescription() << " iso-CDF"));
4281 return graph;
4282 }
4283
4284 /* Draw the CDF of the distribution when its dimension is 2 */
drawCDF(const Point & xMin,const Point & xMax,const Bool logScaleX,const Bool logScaleY) const4285 Graph DistributionImplementation::drawCDF(const Point & xMin,
4286 const Point & xMax,
4287 const Bool logScaleX,
4288 const Bool logScaleY) const
4289 {
4290 return drawCDF(xMin, xMax, Indices(2, ResourceMap::GetAsUnsignedInteger("Distribution-DefaultPointNumber")), logScaleX, logScaleY);
4291 }
4292
4293 /* Draw the CDF of the distribution when its dimension is 2 */
drawCDF(const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4294 Graph DistributionImplementation::drawCDF(const Indices & pointNumber,
4295 const Bool logScaleX,
4296 const Bool logScaleY) const
4297 {
4298 if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: expected pointNumber to be of size 2, here size=" << pointNumber.getSize();
4299 Point xMin(2);
4300 if (isCopula()) xMin = Point(2, 0.0);
4301 else
4302 {
4303 xMin[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4304 xMin[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4305 }
4306 Point xMax(2);
4307 if (isCopula()) xMax = Point(2, 1.0);
4308 else
4309 {
4310 xMax[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4311 xMax[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4312 }
4313 Point delta(2, 0.0);
4314 if (!isCopula()) delta = (2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin"))));
4315 return drawCDF(xMin - delta, xMax + delta, pointNumber, logScaleX, logScaleY);
4316 }
4317
4318 /* Draw the CDF of a 2D marginal */
drawMarginal2DCDF(const UnsignedInteger firstMarginal,const UnsignedInteger secondMarginal,const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4319 Graph DistributionImplementation::drawMarginal2DCDF(const UnsignedInteger firstMarginal,
4320 const UnsignedInteger secondMarginal,
4321 const Point & xMin,
4322 const Point & xMax,
4323 const Indices & pointNumber,
4324 const Bool logScaleX,
4325 const Bool logScaleY) const
4326 {
4327 Indices indices = {firstMarginal, secondMarginal};
4328 Graph marginalGraph(getMarginal(indices).drawCDF(xMin, xMax, pointNumber, logScaleX, logScaleY));
4329 marginalGraph.setTitle(OSS() << getDescription() << "->[" << description_[firstMarginal] << ", " << description_[secondMarginal] << "] components iso-CDF");
4330 return marginalGraph;
4331 }
4332
4333 /* Draw the SurvivalFunction of a discrete distribution */
drawDiscreteSurvivalFunction(const Scalar xMin,const Scalar xMax,const Bool logScale) const4334 Graph DistributionImplementation::drawDiscreteSurvivalFunction(const Scalar xMin,
4335 const Scalar xMax,
4336 const Bool logScale) const
4337 {
4338 // Value : 0 1/5 2/5 3/5 4/5 1
4339 // Data : ------+-----+---+------+----+---------
4340 // Case 1: ------------------------------[----]--
4341 // Case 2: ------------------[---]---------------
4342 // -[--]---------------------------------
4343 // Case 3: ----------[---]-----------------------
4344 // ---[-----------------------------]----
4345 // -------[-----------------]------------
4346 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: cannot draw the SurvivalFunction of a multidimensional discrete distribution this way.";
4347 if (xMax <= xMin) throw InvalidArgumentException(HERE) << "Error: cannot draw a PDF with xMax >= xMin, here xmin=" << xMin << " and xmax=" << xMax;
4348 // Create the graph that will store the staircase representing the empirical SurvivalFunction
4349 const String title(OSS() << getDescription()[0] << " SurvivalFunction");
4350 const Sample support(getSupport(Interval(xMin, xMax)));
4351 const Sample gridY(computeSurvivalFunction(support));
4352 const UnsignedInteger size = support.getSize();
4353 if (size == 0) throw InvalidArgumentException(HERE) << "empty range (" << xMin << ", " << xMax << ")" << ", support is (" << getSupport().getMin()[0] << ", " << getSupport().getMax()[0] << ")";
4354 const String xName(getDescription()[0]);
4355 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
4356 Graph graphSurvivalFunction(title, xName, "SurvivalFunction", true, "topleft", 1, scale);
4357 Sample data(size + 2, 2);
4358 data(0, 0) = xMin;
4359 data(0, 1) = computeSurvivalFunction(xMin);
4360 for (UnsignedInteger i = 0; i < size; ++i)
4361 {
4362 const Scalar x = support(i, 0);
4363 data(i + 1, 0) = x;
4364 data(i + 1, 1) = gridY(i, 0);
4365 }
4366 if (support(size - 1, 0) == xMax) data[size + 1] = data[size];
4367 else
4368 {
4369 data(size + 1, 0) = xMax;
4370 data(size + 1, 1) = computeSurvivalFunction(xMax);
4371 }
4372 graphSurvivalFunction.add(Staircase(data, "red", "solid", 2, "s", title));
4373 return graphSurvivalFunction;
4374 }
4375
4376 /* Draw the SurvivalFunction of the distribution when its dimension is 1 */
drawSurvivalFunction(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const4377 Graph DistributionImplementation::drawSurvivalFunction(const Scalar xMin,
4378 const Scalar xMax,
4379 const UnsignedInteger pointNumber,
4380 const Bool logScale) const
4381 {
4382 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: can draw a SurvivalFunction only if dimension equals 1, here dimension=" << dimension_;
4383 if (xMax <= xMin) throw InvalidArgumentException(HERE) << "Error: cannot draw a SurvivalFunction with xMax >= xMin, here xmin=" << xMin << " and xmax=" << xMax;
4384 if (pointNumber < 2) throw InvalidArgumentException(HERE) << "Error: cannot draw a SurvivalFunction with a point number < 2";
4385 if (isDiscrete()) return drawDiscreteSurvivalFunction(xMin, xMax, logScale);
4386 const SurvivalFunctionWrapper survivalWrapper(Collection<Implementation>(0), this);
4387 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
4388 Graph graphSurvivalFunction(survivalWrapper.draw(xMin, xMax, pointNumber, scale));
4389 Drawable drawable(graphSurvivalFunction.getDrawable(0));
4390 const String title(OSS() << getDescription()[0] << " SurvivalFunction");
4391 drawable.setColor("red");
4392 drawable.setLegend(title);
4393 drawable.setLineStyle("solid");
4394 drawable.setLineWidth(2);
4395 graphSurvivalFunction.setDrawable(drawable, 0);
4396 graphSurvivalFunction.setXTitle(getDescription()[0]);
4397 graphSurvivalFunction.setYTitle("SurvivalFunction");
4398 graphSurvivalFunction.setTitle("");
4399 graphSurvivalFunction.setLegendPosition("topright");
4400 return graphSurvivalFunction;
4401 }
4402
4403 /* Draw the SurvivalFunction of the distribution when its dimension is 1 */
drawSurvivalFunction(const UnsignedInteger pointNumber,const Bool logScale) const4404 Graph DistributionImplementation::drawSurvivalFunction(const UnsignedInteger pointNumber,
4405 const Bool logScale) const
4406 {
4407 if (dimension_ == 2) return drawSurvivalFunction(Indices(2, pointNumber), logScale, logScale);
4408 if (dimension_ != 1) throw InvalidArgumentException(HERE) << "Error: this method is available only for 1D or 2D distributions";
4409 // For discrete distributions, use the numerical range to define the drawing range
4410 const Scalar xMin = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4411 const Scalar xMax = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4412 const Scalar delta = 2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin")));
4413 if (isDiscrete())
4414 {
4415 Scalar a = std::max(xMin - delta, getRange().getLowerBound()[0] - 1.0);
4416 Scalar b = std::min(xMax + delta, getRange().getUpperBound()[0] + 1.0);
4417 if (b <= a)
4418 {
4419 a -= 1.0;
4420 b += 1.0;
4421 }
4422 return drawDiscreteSurvivalFunction(a, b, logScale);
4423 }
4424 return drawSurvivalFunction(xMin - delta, xMax + delta, pointNumber, logScale);
4425 }
4426
4427 /* Draw the SurvivalFunction of a 1D marginal */
drawMarginal1DSurvivalFunction(const UnsignedInteger marginalIndex,const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,const Bool logScale) const4428 Graph DistributionImplementation::drawMarginal1DSurvivalFunction(const UnsignedInteger marginalIndex,
4429 const Scalar xMin,
4430 const Scalar xMax,
4431 const UnsignedInteger pointNumber,
4432 const Bool logScale) const
4433 {
4434 Graph marginalGraph(getMarginal(marginalIndex).drawSurvivalFunction(xMin, xMax, pointNumber, logScale));
4435 marginalGraph.setTitle(OSS() << getDescription() << "->" << description_[marginalIndex] << " component SurvivalFunction");
4436 return marginalGraph;
4437 }
4438
4439 /* Draw the SurvivalFunction of the distribution when its dimension is 2 */
drawSurvivalFunction(const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4440 Graph DistributionImplementation::drawSurvivalFunction(const Point & xMin,
4441 const Point & xMax,
4442 const Indices & pointNumber,
4443 const Bool logScaleX,
4444 const Bool logScaleY) const
4445 {
4446 if (dimension_ == 1) return drawSurvivalFunction(xMin[0], xMax[0], pointNumber[0], logScaleX);
4447 if (xMin.getDimension() != 2) throw InvalidArgumentException(HERE) << "Error: expected xMin to be of dimension 2, here dimension=" << xMin.getDimension();
4448 if (xMax.getDimension() != 2) throw InvalidArgumentException(HERE) << "Error: expected xMax to be of dimension 2, here dimension=" << xMax.getDimension();
4449 if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: expected pointNumber to be of size 2, here size=" << pointNumber.getSize();
4450 if (!(pointNumber[0] >= 2 && pointNumber[1] >= 2)) throw InvalidArgumentException(HERE) << "Error: the discretization must have at least 2 points per component";
4451 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>((logScaleX ? 1 : 0) + (logScaleY ? 2 : 0));
4452 Graph graph(SurvivalFunctionWrapper(Collection<Implementation>(0), this).draw(xMin, xMax, pointNumber, scale));
4453 graph.setXTitle(description_[0]);
4454 graph.setYTitle(description_[1]);
4455 graph.setTitle(String(OSS() << getDescription() << " iso-SurvivalFunction"));
4456 return graph;
4457 }
4458
4459 /* Draw the SurvivalFunction of the distribution when its dimension is 2 */
drawSurvivalFunction(const Point & xMin,const Point & xMax,const Bool logScaleX,const Bool logScaleY) const4460 Graph DistributionImplementation::drawSurvivalFunction(const Point & xMin,
4461 const Point & xMax,
4462 const Bool logScaleX,
4463 const Bool logScaleY) const
4464 {
4465 return drawSurvivalFunction(xMin, xMax, Indices(2, ResourceMap::GetAsUnsignedInteger("Distribution-DefaultPointNumber")), logScaleX, logScaleY);
4466 }
4467
4468 /* Draw the SurvivalFunction of the distribution when its dimension is 2 */
drawSurvivalFunction(const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4469 Graph DistributionImplementation::drawSurvivalFunction(const Indices & pointNumber,
4470 const Bool logScaleX,
4471 const Bool logScaleY) const
4472 {
4473 if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: expected pointNumber to be of size 2, here size=" << pointNumber.getSize();
4474 Point xMin(2);
4475 if (isCopula()) xMin = Point(2, 0.0);
4476 else
4477 {
4478 xMin[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4479 xMin[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0];
4480 }
4481 Point xMax(2);
4482 if (isCopula()) xMax = Point(2, 1.0);
4483 else
4484 {
4485 xMax[0] = getMarginal(0).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4486 xMax[1] = getMarginal(1).computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0];
4487 }
4488 Point delta(2, 0.0);
4489 if (!isCopula()) delta = (2.0 * (xMax - xMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin"))));
4490 return drawSurvivalFunction(xMin - delta, xMax + delta, pointNumber, logScaleX, logScaleY);
4491 }
4492
4493 /* Draw the SurvivalFunction of a 2D marginal */
drawMarginal2DSurvivalFunction(const UnsignedInteger firstMarginal,const UnsignedInteger secondMarginal,const Point & xMin,const Point & xMax,const Indices & pointNumber,const Bool logScaleX,const Bool logScaleY) const4494 Graph DistributionImplementation::drawMarginal2DSurvivalFunction(const UnsignedInteger firstMarginal,
4495 const UnsignedInteger secondMarginal,
4496 const Point & xMin,
4497 const Point & xMax,
4498 const Indices & pointNumber,
4499 const Bool logScaleX,
4500 const Bool logScaleY) const
4501 {
4502 Indices indices = {firstMarginal, secondMarginal};
4503 Graph marginalGraph(getMarginal(indices).drawSurvivalFunction(xMin, xMax, pointNumber, logScaleX, logScaleY));
4504 marginalGraph.setTitle(OSS() << getDescription() << "->[" << description_[firstMarginal] << ", " << description_[secondMarginal] << "] components iso-SurvivalFunction");
4505 return marginalGraph;
4506 }
4507
4508 /* Draw the quantile of the distribution when its dimension is 1 or 2 */
drawQuantile(const UnsignedInteger pointNumber,const Bool logScale) const4509 Graph DistributionImplementation::drawQuantile(const UnsignedInteger pointNumber,
4510 const Bool logScale) const
4511 {
4512 const Scalar qMin = SpecFunc::ScalarEpsilon;
4513 const Scalar qMax = 1.0 - qMin;
4514 return drawQuantile(qMin, qMax, pointNumber, logScale);
4515 }
4516
drawQuantile(const Scalar qMin,const Scalar qMax,const UnsignedInteger pointNumber,const Bool logScale) const4517 Graph DistributionImplementation::drawQuantile(const Scalar qMin,
4518 const Scalar qMax,
4519 const UnsignedInteger pointNumber,
4520 const Bool logScale) const
4521 {
4522 // Generic interface for the 1D and 2D cases
4523 if (dimension_ == 1) return drawQuantile1D(qMin, qMax, pointNumber, logScale);
4524 if (dimension_ == 2) return drawQuantile2D(qMin, qMax, pointNumber, logScale);
4525 throw InvalidDimensionException(HERE) << "Error: can draw the quantiles only if dimension equals 1 or 2, here dimension=" << dimension_;
4526 }
4527
4528 /* Draw the quantile of the distribution when its dimension is 1 */
drawQuantile1D(const Scalar qMin,const Scalar qMax,const UnsignedInteger pointNumber,const Bool logScale) const4529 Graph DistributionImplementation::drawQuantile1D(const Scalar qMin,
4530 const Scalar qMax,
4531 const UnsignedInteger pointNumber,
4532 const Bool logScale) const
4533 {
4534 const QuantileWrapper quantileWrapper(Collection<Implementation>(0), this);
4535 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>(logScale ? 1 : 0);
4536 Graph graphQuantile(quantileWrapper.draw(qMin, qMax, pointNumber, scale));
4537 Drawable drawable(graphQuantile.getDrawable(0));
4538 const String title(OSS() << getDescription()[0] << " Quantile");
4539 drawable.setColor("red");
4540 drawable.setLegend(title);
4541 drawable.setLineStyle("solid");
4542 drawable.setLineWidth(2);
4543 graphQuantile.setDrawable(drawable, 0);
4544 graphQuantile.setXTitle(getDescription()[0]);
4545 graphQuantile.setYTitle("Quantile");
4546 graphQuantile.setTitle("");
4547 graphQuantile.setLegendPosition("topleft");
4548 return graphQuantile;
4549 }
4550
4551 /* Draw the quantile of the distribution when its dimension is 2 */
drawQuantile2D(const Scalar qMin,const Scalar qMax,const UnsignedInteger pointNumber,const Bool logScaleX,const Bool logScaleY) const4552 Graph DistributionImplementation::drawQuantile2D(const Scalar qMin,
4553 const Scalar qMax,
4554 const UnsignedInteger pointNumber,
4555 const Bool logScaleX,
4556 const Bool logScaleY) const
4557 {
4558 const String title(OSS() << getDescription() << " Quantile");
4559 const Sample data(computeQuantile(qMin, qMax, pointNumber));
4560 Curve curveQuantile(data);
4561 curveQuantile.setColor("red");
4562 curveQuantile.setLegend(title);
4563 curveQuantile.setLineStyle("solid");
4564 curveQuantile.setLineWidth(2);
4565 const String xName(getDescription()[0]);
4566 const String yName(getDescription()[1]);
4567 const GraphImplementation::LogScale scale = static_cast<GraphImplementation::LogScale>((logScaleX ? 1 : 0) + (logScaleY ? 2 : 0));
4568 Graph graphQuantile(title, xName, yName, true, "topleft", 1, scale);
4569 graphQuantile.add(drawSurvivalFunction(data.getMin(), data.getMax(), logScaleX, logScaleY).getDrawable(0));
4570 graphQuantile.add(curveQuantile);
4571 Description legends(2);
4572 legends[0] = "iso-SurvivalFunction";
4573 legends[1] = "quantile";
4574 graphQuantile.setLegends(legends);
4575 return graphQuantile;
4576 }
4577
4578 /* Parameters value and description accessor */
getParametersCollection() const4579 DistributionImplementation::PointWithDescriptionCollection DistributionImplementation::getParametersCollection() const
4580 {
4581 // Use compact accessor
4582 PointWithDescription parameters(getParameter());
4583 parameters.setDescription(getParameterDescription());
4584 parameters.setName(getDescription()[0]);
4585 return PointWithDescriptionCollection(1, parameters);
4586 }
4587
setParametersCollection(const PointWithDescriptionCollection & parametersCollection)4588 void DistributionImplementation::setParametersCollection(const PointWithDescriptionCollection & parametersCollection)
4589 {
4590 if (getDimension() == 1)
4591 {
4592 if (parametersCollection.getSize() != 1) throw InvalidArgumentException(HERE) << "Expected collection of size 1, got " << parametersCollection.getSize();
4593 setParameter(parametersCollection[0]);
4594 }
4595
4596 // Get the actual collection of parameters to check the description and the size
4597 const PointWithDescriptionCollection actualParameters(getParametersCollection());
4598 const UnsignedInteger size = actualParameters.getSize();
4599 if (parametersCollection.getSize() != size) throw InvalidArgumentException(HERE) << "Error: the given parameters collection has an invalid size (" << parametersCollection.getSize() << "), it should be " << size;
4600 PointCollection coll(0);
4601 for (UnsignedInteger i = 0; i < size; ++i)
4602 {
4603 const UnsignedInteger dimension = actualParameters[i].getDimension();
4604 if (parametersCollection[i].getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given parameters collection has an invalid dimension at index " << i;
4605 coll.add(parametersCollection[i]);
4606 }
4607 setParametersCollection(coll);
4608 }
4609
setParametersCollection(const PointCollection & parametersCollection)4610 void DistributionImplementation::setParametersCollection(const PointCollection & parametersCollection)
4611 {
4612 const UnsignedInteger size = parametersCollection.getSize();
4613 Point newParameters;
4614 for (UnsignedInteger i = 0; i < size; ++ i) newParameters.add(parametersCollection[i]);
4615 setParameter(newParameters);
4616 }
4617
4618 /* Parameters value accessor */
getParameter() const4619 Point DistributionImplementation::getParameter() const
4620 {
4621 throw NotYetImplementedException(HERE) << "In DistributionImplementation::getParameter()";
4622 }
4623
setParameter(const Point & parameters)4624 void DistributionImplementation::setParameter(const Point & parameters)
4625 {
4626 if (parameters.getSize() != 0) throw InvalidArgumentException(HERE) << "Error: expected 0 parameters, got " << parameters.getSize();
4627 }
4628
4629 /* Parameters description accessor */
getParameterDescription() const4630 Description DistributionImplementation::getParameterDescription() const
4631 {
4632 throw NotYetImplementedException(HERE) << "In DistributionImplementation::getParameterDescription()";
4633 }
4634
4635
4636 /* Parameters number */
getParameterDimension() const4637 UnsignedInteger DistributionImplementation::getParameterDimension() const
4638 {
4639 return getParameter().getSize();
4640 }
4641
4642 /* Description accessor */
setDescription(const Description & description)4643 void DistributionImplementation::setDescription(const Description & description)
4644 {
4645 const UnsignedInteger size = description.getSize();
4646 if (size != getDimension()) throw InvalidArgumentException(HERE) << "Error: the description must have the same size than the distribution dimension, here size=" << size << " and dimension=" << getDimension();
4647 // Check if the description is valid
4648 // First, copy the description
4649 Description test(description);
4650 // Second, sort the copy
4651 std::sort(test.begin(), test.end());
4652 // Third, move the duplicates at the end
4653 Description::const_iterator it = std::unique(test.begin(), test.end());
4654 // Fourth, check if there was any duplicate
4655 if (it != test.end())
4656 {
4657 LOGWARN(OSS() << "Warning! The description of the distribution " << getName() << " is " << description << " and cannot identify uniquely the marginal distribution. Use default description instead.");
4658 description_ = Description::BuildDefault(dimension_, "X");
4659 }
4660 else description_ = description;
4661 }
4662
4663 /* Description accessot */
getDescription() const4664 Description DistributionImplementation::getDescription() const
4665 {
4666 return description_;
4667 }
4668
4669 /* Accessor to PDF computation precision */
getPDFEpsilon() const4670 Scalar DistributionImplementation::getPDFEpsilon() const
4671 {
4672 return pdfEpsilon_;
4673 }
4674
4675 /* Accessor to CDF computation precision */
getCDFEpsilon() const4676 Scalar DistributionImplementation::getCDFEpsilon() const
4677 {
4678 return cdfEpsilon_;
4679 }
4680
4681 /* Get a positon indicator for a 1D distribution */
getPositionIndicator() const4682 Scalar DistributionImplementation::getPositionIndicator() const
4683 {
4684 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: cannot get the position indicator of a distribution with dimension > 1";
4685 // First, try to return the mean of the distribution
4686 try
4687 {
4688 return getMean()[0];
4689 }
4690 catch (...)
4691 {
4692 // Second, return the median of the distribution
4693 return computeQuantile(0.5)[0];
4694 }
4695 }
4696
4697 /* Get a dispersion indicator for a 1D distribution */
getDispersionIndicator() const4698 Scalar DistributionImplementation::getDispersionIndicator() const
4699 {
4700 if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: cannot get the dispersion indicator of a distribution with dimension > 1";
4701 // First, try to return the standard deviation of the distribution
4702 try
4703 {
4704 return getStandardDeviation()[0];
4705 }
4706 catch (...)
4707 {
4708 // Second, return the interquartile of the distribution
4709 return computeQuantile(0.75)[0] - computeQuantile(0.25)[0];
4710 }
4711 }
4712
4713 /* Is it safe to compute PDF/CDF etc in parallel? */
isParallel() const4714 Bool DistributionImplementation::isParallel() const
4715 {
4716 return isParallel_;
4717 }
4718
setParallel(const Bool flag)4719 void DistributionImplementation::setParallel(const Bool flag)
4720 {
4721 isParallel_ = flag;
4722 }
4723
4724 /* Method save() stores the object through the StorageManager */
save(Advocate & adv) const4725 void DistributionImplementation::save(Advocate & adv) const
4726 {
4727 PersistentObject::save(adv);
4728 adv.saveAttribute( "mean_", mean_ );
4729 adv.saveAttribute( "covariance_", covariance_ );
4730 adv.saveAttribute( "gaussNodes_", gaussNodes_ );
4731 adv.saveAttribute( "gaussWeights_", gaussWeights_ );
4732 adv.saveAttribute( "integrationNodesNumber_", integrationNodesNumber_ );
4733 adv.saveAttribute( "isAlreadyComputedMean_", isAlreadyComputedMean_ );
4734 adv.saveAttribute( "isAlreadyComputedCovariance_", isAlreadyComputedCovariance_ );
4735 adv.saveAttribute( "isAlreadyComputedGaussNodesAndWeights_", isAlreadyComputedGaussNodesAndWeights_ );
4736 adv.saveAttribute( "dimension_", dimension_ );
4737 adv.saveAttribute( "weight_", weight_ );
4738 adv.saveAttribute( "range_", range_ );
4739 adv.saveAttribute( "description_", description_ );
4740 adv.saveAttribute( "isCopula_", isCopula_ );
4741 adv.saveAttribute( "isParallel_", isParallel_ );
4742 }
4743
4744 /* Method load() reloads the object from the StorageManager */
load(Advocate & adv)4745 void DistributionImplementation::load(Advocate & adv)
4746 {
4747 PersistentObject::load(adv);
4748 adv.loadAttribute( "mean_", mean_ );
4749 adv.loadAttribute( "covariance_", covariance_ );
4750 adv.loadAttribute( "gaussNodes_", gaussNodes_ );
4751 adv.loadAttribute( "gaussWeights_", gaussWeights_ );
4752 adv.loadAttribute( "integrationNodesNumber_", integrationNodesNumber_ );
4753 adv.loadAttribute( "isAlreadyComputedMean_", isAlreadyComputedMean_ );
4754 adv.loadAttribute( "isAlreadyComputedCovariance_", isAlreadyComputedCovariance_ );
4755 adv.loadAttribute( "isAlreadyComputedGaussNodesAndWeights_", isAlreadyComputedGaussNodesAndWeights_ );
4756 adv.loadAttribute( "dimension_", dimension_ );
4757 adv.loadAttribute( "weight_", weight_ );
4758 adv.loadAttribute( "range_", range_ );
4759 adv.loadAttribute( "description_", description_ );
4760 adv.loadAttribute( "isCopula_", isCopula_ );
4761 if (adv.hasAttribute("isParallel_"))
4762 adv.loadAttribute( "isParallel_", isParallel_ );
4763 }
4764
4765 /* Transformation of distributions by usual functions */
cos() const4766 Distribution DistributionImplementation::cos() const
4767 {
4768 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4769 const Scalar a = getRange().getLowerBound()[0];
4770 const Scalar b = getRange().getUpperBound()[0];
4771 const SignedInteger nMin = static_cast< SignedInteger > (ceil(a / M_PI));
4772 const SignedInteger nMax = static_cast< SignedInteger > (floor(b / M_PI));
4773 Point bounds(1, a);
4774 Point values(1, std::cos(a));
4775 for (SignedInteger n = nMin; n <= nMax; ++n)
4776 {
4777 bounds.add(n * M_PI);
4778 values.add(n % 2 == 0 ? 1.0 : -1.0);
4779 }
4780 bounds.add(b);
4781 values.add(std::cos(b));
4782 return new CompositeDistribution(SymbolicFunction("x", "cos(x)"), clone(), bounds, values);
4783 }
4784
sin() const4785 Distribution DistributionImplementation::sin() const
4786 {
4787 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4788 const Scalar a = getRange().getLowerBound()[0];
4789 const Scalar b = getRange().getUpperBound()[0];
4790 const SignedInteger nMin = static_cast< SignedInteger > (ceil(a / M_PI - 0.5));
4791 const SignedInteger nMax = static_cast< SignedInteger > (floor(b / M_PI - 0.5));
4792 Point bounds(1, a);
4793 Point values(1, std::sin(a));
4794 for (SignedInteger n = nMin; n <= nMax; ++n)
4795 {
4796 bounds.add((n + 0.5) * M_PI);
4797 values.add(n % 2 == 0 ? 1.0 : -1.0);
4798 }
4799 bounds.add(b);
4800 values.add(std::sin(b));
4801 return new CompositeDistribution(SymbolicFunction("x", "sin(x)"), clone(), bounds, values);
4802 }
4803
tan() const4804 Distribution DistributionImplementation::tan() const
4805 {
4806 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4807 const Scalar a = getRange().getLowerBound()[0];
4808 const Scalar b = getRange().getUpperBound()[0];
4809 const SignedInteger nMin = static_cast< SignedInteger > (ceil(a / M_PI - 0.5));
4810 const SignedInteger nMax = static_cast< SignedInteger > (floor(b / M_PI - 0.5));
4811 // Compute the lower bound and upper bound of the support of tan(X)
4812 Scalar sumPDF = 0.0;
4813 for (SignedInteger n = nMin; n <= nMax; ++n) sumPDF += computePDF((n + 0.5) * M_PI);
4814 const Scalar bound = std::tan(M_PI_2 - quantileEpsilon_ / sumPDF);
4815 Point bounds(1, a);
4816 Point values(1, std::tan(a));
4817 for (SignedInteger n = nMin; n <= nMax; ++n)
4818 {
4819 bounds.add((n + 0.5) * M_PI);
4820 values.add(bound);
4821 bounds.add((n + 0.5) * M_PI);
4822 values.add(-bound);
4823 }
4824 bounds.add(b);
4825 values.add(std::tan(b));
4826 return new CompositeDistribution(SymbolicFunction("x", "tan(x)"), clone(), bounds, values);
4827 }
4828
acos() const4829 Distribution DistributionImplementation::acos() const
4830 {
4831 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4832 const Scalar a = getRange().getLowerBound()[0];
4833 if (a < -1.0) throw InvalidArgumentException(HERE) << "Error: cannot take the arc cos of a random variable that takes values less than -1 with positive probability.";
4834 const Scalar b = getRange().getUpperBound()[0];
4835 if (!(b <= 1.0)) throw InvalidArgumentException(HERE) << "Error: cannot take the arc cos of a random variable that takes values greater than 1 with positive probability.";
4836 const Point bounds = {a, b};
4837 const Point values = {std::acos(a), std::acos(b)};
4838 return new CompositeDistribution(SymbolicFunction("x", "acos(x)"), clone(), bounds, values);
4839 }
4840
asin() const4841 Distribution DistributionImplementation::asin() const
4842 {
4843 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4844 const Scalar a = getRange().getLowerBound()[0];
4845 if (a < -1.0) throw InvalidArgumentException(HERE) << "Error: cannot take the arc sin of a random variable that takes values less than -1 with positive probability.";
4846 const Scalar b = getRange().getUpperBound()[0];
4847 if (!(b <= 1.0)) throw InvalidArgumentException(HERE) << "Error: cannot take the arc sin of a random variable that takes values greater than 1 with positive probability.";
4848 const Point bounds = {a, b};
4849 const Point values = {std::asin(a), std::asin(b)};
4850 return new CompositeDistribution(SymbolicFunction("x", "asin(x)"), clone(), bounds, values);
4851 }
4852
atan() const4853 Distribution DistributionImplementation::atan() const
4854 {
4855 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4856 const Scalar a = getRange().getLowerBound()[0];
4857 const Scalar b = getRange().getUpperBound()[0];
4858 const Point bounds = {a, b};
4859 const Point values = {std::atan(a), std::atan(b)};
4860 return new CompositeDistribution(SymbolicFunction("x", "atan(x)"), clone(), bounds, values);
4861 }
4862
cosh() const4863 Distribution DistributionImplementation::cosh() const
4864 {
4865 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4866 const Scalar a = getRange().getLowerBound()[0];
4867 Point bounds(1, a);
4868 Point values(1, std::cosh(a));
4869 const Scalar b = getRange().getUpperBound()[0];
4870 if ((a < 0.0) && (b > 0.0))
4871 {
4872 bounds.add(0.0);
4873 values.add(1.0);
4874 }
4875 bounds.add(b);
4876 values.add(std::cosh(b));
4877 return new CompositeDistribution(SymbolicFunction("x", "cosh(x)"), clone(), bounds, values);
4878 }
4879
sinh() const4880 Distribution DistributionImplementation::sinh() const
4881 {
4882 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4883 const Scalar a = getRange().getLowerBound()[0];
4884 const Scalar b = getRange().getUpperBound()[0];
4885 const Point bounds = {a, b};
4886 const Point values = {std::sinh(a), std::sinh(b)};
4887 return new CompositeDistribution(SymbolicFunction("x", "sinh(x)"), clone(), bounds, values);
4888 }
4889
tanh() const4890 Distribution DistributionImplementation::tanh() const
4891 {
4892 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4893 const Scalar a = getRange().getLowerBound()[0];
4894 const Scalar b = getRange().getUpperBound()[0];
4895 const Point bounds = {a, b};
4896 const Point values = {std::tanh(a), std::tanh(b)};
4897 return new CompositeDistribution(SymbolicFunction("x", "tanh(x)"), clone(), bounds, values);
4898 }
4899
acosh() const4900 Distribution DistributionImplementation::acosh() const
4901 {
4902 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4903 const Scalar a = getRange().getLowerBound()[0];
4904 if (!(a >= 1.0)) throw InvalidArgumentException(HERE) << "Error: cannot take the arc cosh of a random variable that takes values less than 1 with positive probability.";
4905 const Scalar b = getRange().getUpperBound()[0];
4906 const Point bounds = {a, b};
4907 const Point values = {SpecFunc::Acosh(a), SpecFunc::Acosh(b)};
4908 return new CompositeDistribution(SymbolicFunction("x", "acosh(x)"), clone(), bounds, values);
4909 }
4910
asinh() const4911 Distribution DistributionImplementation::asinh() const
4912 {
4913 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4914 const Scalar a = getRange().getLowerBound()[0];
4915 const Scalar b = getRange().getUpperBound()[0];
4916 const Point bounds = {a, b};
4917 const Point values = {SpecFunc::Asinh(a), SpecFunc::Asinh(b)};
4918 return new CompositeDistribution(SymbolicFunction("x", "asinh(x)"), clone(), bounds, values);
4919 }
4920
atanh() const4921 Distribution DistributionImplementation::atanh() const
4922 {
4923 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4924 const Scalar a = getRange().getLowerBound()[0];
4925 if (a < -1.0) throw InvalidArgumentException(HERE) << "Error: cannot take the arc tanh of a random variable that takes values less than -1 with positive probability.";
4926 const Scalar b = getRange().getUpperBound()[0];
4927 if (!(b <= 1.0)) throw InvalidArgumentException(HERE) << "Error: cannot take the arc tanh of a random variable that takes values greater than 1 with positive probability.";
4928 Point bounds(1, a);
4929 // F_Y(y)=P(atanh(X)<y)<->P(X<tanh(y))=F_X(tanh(y))
4930 // y s.t. F_Y(y)=epsilon<->y=atanh(F_X^{-1}(epsilon))
4931
4932 Point values(1, a == -1.0 ? SpecFunc::Atanh(computeQuantile(quantileEpsilon_)[0]) : SpecFunc::Atanh(a));
4933 bounds.add(b);
4934 values.add(b == 1.0 ? SpecFunc::Atanh(computeQuantile(quantileEpsilon_, true)[0]) : SpecFunc::Atanh(b));
4935 return new CompositeDistribution(SymbolicFunction("x", "atanh(x)"), clone(), bounds, values);
4936 }
4937
exp() const4938 Distribution DistributionImplementation::exp() const
4939 {
4940 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4941 // Check if we can reuse an existing class
4942 if (getClassName() == "Normal")
4943 {
4944 const Point parameters(getParameter());
4945 return new LogNormal(parameters[0], parameters[1]);
4946 }
4947 if (getClassName() == "Uniform")
4948 {
4949 const Point parameters(getParameter());
4950 return new LogUniform(parameters[0], parameters[1]);
4951 }
4952 const Scalar a = getRange().getLowerBound()[0];
4953 const Scalar b = getRange().getUpperBound()[0];
4954 const Point bounds = {a, b};
4955 const Point values = {std::exp(a), std::exp(b)};
4956 return new CompositeDistribution(SymbolicFunction("x", "exp(x)"), clone(), bounds, values);
4957 }
4958
log() const4959 Distribution DistributionImplementation::log() const
4960 {
4961 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4962 // Check if we can reuse an existing class
4963 if (getClassName() == "LogNormal")
4964 {
4965 const Point parameters(getParameter());
4966 if (parameters[2] == 0.0) return new Normal(parameters[0], parameters[1]);
4967 }
4968 if (getClassName() == "LogUniform")
4969 {
4970 const Point parameters(getParameter());
4971 return new Uniform(parameters[0], parameters[1]);
4972 }
4973 const Scalar a = getRange().getLowerBound()[0];
4974 if (!(a >= 0.0)) throw NotDefinedException(HERE) << "Error: cannot take the logarithm of a random variable that takes negative values with positive probability.";
4975 const Scalar b = getRange().getUpperBound()[0];
4976 const Point bounds = {a, b};
4977 Point values(1, (a == 0.0 ? std::log(computeQuantile(quantileEpsilon_)[0]) : std::log(a)));
4978 values.add(std::log(b));
4979 return new CompositeDistribution(SymbolicFunction("x", "log(x)"), clone(), bounds, values);
4980 }
4981
ln() const4982 Distribution DistributionImplementation::ln() const
4983 {
4984 return log();
4985 }
4986
pow(const Scalar exponent) const4987 Distribution DistributionImplementation::pow(const Scalar exponent) const
4988 {
4989 LOGDEBUG(OSS() << "Scalar exponent=" << exponent);
4990 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
4991 // First, the case where the exponent is integer
4992 if (trunc(exponent) == exponent) return pow(static_cast< SignedInteger >(trunc(exponent)));
4993 if (exponent == 0.5) return (*this).sqrt();
4994 if (exponent == 1.0 / 3.0) return (*this).cbrt();
4995 const Scalar a = getRange().getLowerBound()[0];
4996 if (!(a >= 0.0)) throw NotDefinedException(HERE) << "Error: cannot take a fractional power of a random variable that takes negative values with positive probability.";
4997
4998 SymbolicFunction toPower("x", String(OSS() << (exponent < 0.0 ? "x^(" : "x^") << exponent << (exponent < 0.0 ? ")" : "")));
4999 Point bounds(1, a);
5000 Point values(1, (a == 0.0 ? (exponent < 0.0 ? std::pow(computeQuantile(quantileEpsilon_)[0], exponent) : 0.0) : std::pow(a, exponent)));
5001 const Scalar b = getRange().getUpperBound()[0];
5002 bounds.add(b);
5003 values.add(std::pow(b, exponent));
5004 return new CompositeDistribution(toPower, clone(), bounds, values);
5005 }
5006
pow(const SignedInteger exponent) const5007 Distribution DistributionImplementation::pow(const SignedInteger exponent) const
5008 {
5009 LOGDEBUG(OSS() << "Signed integer exponent=" << exponent);
5010 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
5011 if (exponent == 0) return new Dirac(Point(1, 1.0));
5012 if (exponent == 1) return *this;
5013 if (exponent == 2) return (*this).sqr();
5014 if (exponent == -1) return (*this).inverse();
5015 const Scalar a = getRange().getLowerBound()[0];
5016 SymbolicFunction toPower("x", String(OSS() << (exponent < 0.0 ? "x^(" : "x^") << exponent << (exponent < 0.0 ? ")" : "")));
5017 // Easy case: a >= 0
5018 if (a >= 0.0)
5019 {
5020 Point bounds(1, a);
5021 Point values(1, (a == 0.0 ? (exponent < 0.0 ? std::pow(computeQuantile(quantileEpsilon_)[0], 1.0 * exponent) : 0.0) : std::pow(a, 1.0 * exponent)));
5022 const Scalar b = getRange().getUpperBound()[0];
5023 bounds.add(b);
5024 values.add(std::pow(b, 1.0 * exponent));
5025 LOGDEBUG(OSS() << "a=" << a << ", toPower=" << toPower << ", bounds=" << bounds << ", values=" << values);
5026 return new CompositeDistribution(toPower, clone(), bounds, values);
5027 }
5028 // Easy case: b <= 0
5029 Point bounds(1, a);
5030 Point values(1, std::pow(a, 1.0 * exponent));
5031 const Scalar b = getRange().getUpperBound()[0];
5032 if (b <= 0.0)
5033 {
5034 bounds.add(b);
5035 values.add(b == 0.0 ? (exponent < 0.0 ? std::pow(computeQuantile(quantileEpsilon_, true)[0], 1.0 * exponent) : 0.0) : std::pow(b, 1.0 * exponent));
5036 LOGDEBUG(OSS() << "b=" << b << ", toPower=" << toPower << ", bounds=" << bounds << ", values=" << values);
5037 return new CompositeDistribution(toPower, clone(), bounds, values);
5038 }
5039 // Difficult case: a < 0 < b
5040 // For odd exponents, the function is bijective
5041 if (exponent % 2 != 0)
5042 {
5043 // No singularity at 0 for positive exponent
5044 if (exponent > 0)
5045 {
5046 bounds.add(b);
5047 values.add(std::pow(b, 1.0 * exponent));
5048 LOGDEBUG(OSS() << "odd exponent=" << exponent << ", toPower=" << toPower << ", bounds=" << bounds << ", values=" << values);
5049 return new CompositeDistribution(toPower, clone(), bounds, values);
5050 }
5051 // A singularity at 0 for negative exponent
5052 bounds.add(0.0);
5053 values.add(SpecFunc::LowestScalar);
5054 bounds.add(0.0);
5055 values.add(SpecFunc::MaxScalar);
5056 bounds.add(b);
5057 values.add(std::pow(b, 1.0 * exponent));
5058 LOGDEBUG(OSS() << "odd exponent=" << exponent << ", toPower=" << toPower << ", bounds=" << bounds << ", values=" << values);
5059 return new CompositeDistribution(toPower, clone(), bounds, values);
5060 }
5061 // For even exponent, the behaviour changes at 0
5062 bounds.add(0.0);
5063 values.add(exponent > 0 ? 0.0 : SpecFunc::MaxScalar);
5064 bounds.add(b);
5065 values.add(std::pow(b, 1.0 * exponent));
5066 LOGDEBUG(OSS() << "even exponent=" << exponent << ", toPower=" << toPower << ", bounds=" << bounds << ", values=" << values);
5067 return new CompositeDistribution(toPower, clone(), bounds, values);
5068 }
5069
sqr() const5070 Distribution DistributionImplementation::sqr() const
5071 {
5072 // Check if we can reuse an existing class
5073 if (getClassName() == "Chi")
5074 {
5075 const Point parameters(getParameter());
5076 return new ChiSquare(parameters[0]);
5077 }
5078 if (getClassName() == "Normal")
5079 {
5080 const Point parameters(getParameter());
5081 return new SquaredNormal(parameters[0], parameters[1]);
5082 }
5083 return new CompositeDistribution(SymbolicFunction("x", "x^2"), clone());
5084 }
5085
inverse() const5086 Distribution DistributionImplementation::inverse() const
5087 {
5088 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
5089 const Scalar a = getRange().getLowerBound()[0];
5090 Point bounds(1, a);
5091 const SymbolicFunction inverseFunction("x", "1.0 / x");
5092 // Easy case: a >= 0
5093 if (a >= 0.0)
5094 {
5095 Point values(1, (a == 0.0 ? 1.0 / computeQuantile(quantileEpsilon_)[0] : 1.0 / a));
5096 const Scalar b = getRange().getUpperBound()[0];
5097 bounds.add(b);
5098 if (getRange().getFiniteUpperBound()[0])
5099 values.add(1.0 / b);
5100 else
5101 values.add(0.0);
5102 return new CompositeDistribution(inverseFunction, clone(), bounds, values);
5103 }
5104 // Here, a < 0
5105 Point values(1);
5106 if (getRange().getFiniteLowerBound()[0])
5107 values[0] = 1.0 / a;
5108 else
5109 values[0] = 0.0;
5110 const Scalar b = getRange().getUpperBound()[0];
5111 // Easy case: b <= 0
5112 if (b <= 0.0)
5113 {
5114 bounds.add(b);
5115 values.add(b == 0.0 ? 1.0 / computeQuantile(quantileEpsilon_, true)[0] : 1.0 / b);
5116 return new CompositeDistribution(inverseFunction, clone(), bounds, values);
5117 }
5118 // Difficult case: a < 0 < b
5119 // A singularity at 0
5120 bounds.add(0.0);
5121 // The CDF of Y=1/X is
5122 // F_Y(y)=[F_X(0) - F_X(1 / y)]1_{y < 0} +
5123 // [F_X(0) + 1 - F_X(1 / y)]1_{y > 0} +
5124 // F_X(0)1_{y = 0}
5125 // so the bounds for Y are obtained when X->0^- and X->0^+
5126 values.add(1.0 / computeQuantile(computeCDF(0.0) - quantileEpsilon_)[0]);
5127 bounds.add(0.0);
5128 values.add(1.0 / computeQuantile(computeCDF(0.0) + quantileEpsilon_)[0]);
5129 bounds.add(b);
5130 if (getRange().getFiniteUpperBound()[0])
5131 values.add(1.0 / b);
5132 else
5133 values.add(0.0);
5134 return new CompositeDistribution(inverseFunction, clone(), bounds, values);
5135 }
5136
sqrt() const5137 Distribution DistributionImplementation::sqrt() const
5138 {
5139 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
5140 // Check if we can reuse an existing class
5141 if (getClassName() == "ChiSquare")
5142 {
5143 const Point parameters(getParameter());
5144 return new Chi(parameters[0]);
5145 }
5146 const Scalar a = getRange().getLowerBound()[0];
5147 if (!(a >= 0.0)) throw NotDefinedException(HERE) << "Error: cannot take the square root of a random variable that takes negative values with positive probability.";
5148 const Scalar b = getRange().getUpperBound()[0];
5149 const Point bounds = {a, b};
5150 const Point values = {std::sqrt(a), std::sqrt(b)};
5151 return new CompositeDistribution(SymbolicFunction("x", "sqrt(x)"), clone(), bounds, values);
5152 }
5153
cbrt() const5154 Distribution DistributionImplementation::cbrt() const
5155 {
5156 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
5157 const Scalar a = getRange().getLowerBound()[0];
5158 const Scalar b = getRange().getUpperBound()[0];
5159 const Point bounds = {a, b};
5160 const Point values = {SpecFunc::Cbrt(a), SpecFunc::Cbrt(b)};
5161 return new CompositeDistribution(SymbolicFunction("x", "cbrt(x)"), clone(), bounds, values);
5162 }
5163
abs() const5164 Distribution DistributionImplementation::abs() const
5165 {
5166 if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: the distribution must be univariate.";
5167 // First the easy cases
5168 const Scalar a = getRange().getLowerBound()[0];
5169 if (a >= 0.0) return *this;
5170 const Scalar b = getRange().getUpperBound()[0];
5171 if (b <= 0.0) return (*this) * (-1.0);
5172 // Now the difficult case
5173 const Point bounds = {a, 0.0, b};
5174 const Point values = {std::abs(a), 0.0, b};
5175 return new CompositeDistribution(SymbolicFunction("x", "abs(x)"), clone(), bounds, values);
5176 }
5177
5178 /* Quantile epsilon accessor */
getQuantileEpsilon() const5179 Scalar DistributionImplementation::getQuantileEpsilon() const
5180 {
5181 return quantileEpsilon_;
5182 }
5183
setQuantileEpsilon(const Scalar quantileEpsilon)5184 void DistributionImplementation::setQuantileEpsilon(const Scalar quantileEpsilon)
5185 {
5186 quantileEpsilon_ = quantileEpsilon;
5187 }
5188
5189 END_NAMESPACE_OPENTURNS
5190