1 //                                               -*- C++ -*-
2 /**
3  *  @brief The maximum entropy order statistics distribution
4  *
5  *  Copyright 2005-2021 Airbus-EDF-IMACS-ONERA-Phimeca
6  *
7  *  This library is free software: you can redistribute it and/or modify
8  *  it under the terms of the GNU Lesser General Public License as published by
9  *  the Free Software Foundation, either version 3 of the License, or
10  *  (at your option) any later version.
11  *
12  *  This library is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU Lesser General Public License for more details.
16  *
17  *  You should have received a copy of the GNU Lesser General Public License
18  *  along with this library.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  */
21 #include <cmath>
22 #include "openturns/MaximumEntropyOrderStatisticsDistribution.hxx"
23 #include "openturns/MaximumEntropyOrderStatisticsCopula.hxx"
24 #include "openturns/RandomGenerator.hxx"
25 #include "openturns/SpecFunc.hxx"
26 #include "openturns/PersistentObjectFactory.hxx"
27 #include "openturns/OrderStatisticsMarginalChecker.hxx"
28 #include "openturns/Uniform.hxx"
29 #include "openturns/GaussKronrod.hxx"
30 #include "openturns/Brent.hxx"
31 #include "openturns/MethodBoundEvaluation.hxx"
32 
33 BEGIN_NAMESPACE_OPENTURNS
34 
35 CLASSNAMEINIT(MaximumEntropyOrderStatisticsDistribution)
36 
37 static const Factory<MaximumEntropyOrderStatisticsDistribution> Factory_MaximumEntropyOrderStatisticsDistribution;
38 
39 
40 /* Default constructor */
MaximumEntropyOrderStatisticsDistribution()41 MaximumEntropyOrderStatisticsDistribution::MaximumEntropyOrderStatisticsDistribution()
42   : ContinuousDistribution()
43 {
44   setName("MaximumEntropyOrderStatisticsDistribution");
45   DistributionCollection coll(2);
46   coll[0] = Uniform(-1.0, 0.5);
47   coll[1] = Uniform(-0.5, 1.0);
48   integrator_ = GaussKronrod(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-ExponentialFactorDiscretization"), ResourceMap::GetAsScalar("GaussKronrod-MaximumError"), GaussKronrodRule(GaussKronrodRule::G7K15));
49   // This call set also the range. Use approximation but don't check marginals.
50   setDistributionCollection(coll, true, false);
51   setIntegrationNodesNumber(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-CDFIntegrationNodesNumber"));
52   // To insure that the nodes will be already computed when calling computeCDF() in parallel
53   Point weights;
54   Point nodes(getGaussNodesAndWeights(weights));
55 }
56 
57 /* Parameters constructor */
MaximumEntropyOrderStatisticsDistribution(const DistributionCollection & coll,const Bool useApprox,const Bool checkMarginals)58 MaximumEntropyOrderStatisticsDistribution::MaximumEntropyOrderStatisticsDistribution(const DistributionCollection & coll,
59     const Bool useApprox,
60     const Bool checkMarginals)
61   : ContinuousDistribution()
62   , distributionCollection_(coll)
63 {
64   setName("MaximumEntropyOrderStatisticsDistribution");
65   integrator_ = GaussKronrod(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-ExponentialFactorDiscretization"), ResourceMap::GetAsScalar("GaussKronrod-MaximumError"), GaussKronrodRule(GaussKronrodRule::G7K15));
66   // This call set also the range.
67   setDistributionCollection(coll, useApprox, checkMarginals);
68   setIntegrationNodesNumber(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-CDFIntegrationNodesNumber"));
69   // To insure that the nodes will be already computed when calling computeCDF() in parallel
70   Point weights;
71   Point nodes(getGaussNodesAndWeights(weights));
72 }
73 
74 /* Parameters constructor */
MaximumEntropyOrderStatisticsDistribution(const DistributionCollection & coll,const Indices & partition,const Bool useApprox,const Collection<PiecewiseHermiteEvaluation> & exponentialFactorApproximation,const Description & description)75 MaximumEntropyOrderStatisticsDistribution::MaximumEntropyOrderStatisticsDistribution(const DistributionCollection & coll,
76     const Indices & partition,
77     const Bool useApprox,
78     const Collection<PiecewiseHermiteEvaluation> & exponentialFactorApproximation,
79     const Description & description)
80   : ContinuousDistribution()
81   , distributionCollection_(coll)
82   , partition_(partition)
83   , useApproximation_(useApprox)
84   , exponentialFactorApproximation_(exponentialFactorApproximation)
85   , integrator_(GaussKronrod(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-ExponentialFactorDiscretization"), ResourceMap::GetAsScalar("GaussKronrod-MaximumError"), GaussKronrodRule(GaussKronrodRule::G7K15)))
86 {
87   isParallel_ = false;
88   // Initialize the distribution manually in order to avoid costly checks that are not needed here
89   const UnsignedInteger size = coll.getSize();
90   setDimension(size);
91   computeRange();
92   setDescription(description);
93   setIntegrationNodesNumber(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-CDFIntegrationNodesNumber"));
94   // To insure that the nodes will be already computed when calling computeCDF() in parallel
95   Point weights;
96   Point nodes(getGaussNodesAndWeights(weights));
97 }
98 
99 /* Comparison operator */
operator ==(const MaximumEntropyOrderStatisticsDistribution & other) const100 Bool MaximumEntropyOrderStatisticsDistribution::operator ==(const MaximumEntropyOrderStatisticsDistribution & other) const
101 {
102   if (this == &other) return true;
103   return (distributionCollection_ == other.distributionCollection_);
104 }
105 
equals(const DistributionImplementation & other) const106 Bool MaximumEntropyOrderStatisticsDistribution::equals(const DistributionImplementation & other) const
107 {
108   const MaximumEntropyOrderStatisticsDistribution* p_other = dynamic_cast<const MaximumEntropyOrderStatisticsDistribution*>(&other);
109   return p_other && (*this == *p_other);
110 }
111 
112 /* String converter */
__repr__() const113 String MaximumEntropyOrderStatisticsDistribution::__repr__() const
114 {
115   OSS oss(true);
116   oss << "class=" << MaximumEntropyOrderStatisticsDistribution::GetClassName()
117       << " name=" << getName()
118       << " dimension=" << getDimension()
119       << " collection=" << distributionCollection_;
120   return oss;
121 }
122 
__str__(const String &) const123 String MaximumEntropyOrderStatisticsDistribution::__str__(const String & ) const
124 {
125   OSS oss(false);
126   oss << getClassName() << "(collection = " << distributionCollection_ << ")";
127   return oss;
128 }
129 
130 /* Virtual constructor */
clone() const131 MaximumEntropyOrderStatisticsDistribution * MaximumEntropyOrderStatisticsDistribution::clone() const
132 {
133   return new MaximumEntropyOrderStatisticsDistribution(*this);
134 }
135 
136 /* Compute the numerical range of the distribution given the parameters values */
computeRange()137 void MaximumEntropyOrderStatisticsDistribution::computeRange()
138 {
139   const UnsignedInteger dimension = getDimension();
140   Point lowerBound(dimension);
141   Point upperBound(dimension);
142   Interval::BoolCollection finiteLowerBound(dimension);
143   Interval::BoolCollection finiteUpperBound(dimension);
144   for (UnsignedInteger i = 0; i < dimension; ++i)
145   {
146     const Interval atomRange(distributionCollection_[i].getRange());
147     lowerBound[i] = atomRange.getLowerBound()[0];
148     upperBound[i] = atomRange.getUpperBound()[0];
149     finiteLowerBound[i] = atomRange.getFiniteLowerBound()[0];
150     finiteUpperBound[i] = atomRange.getFiniteUpperBound()[0];
151   }
152   setRange(Interval(lowerBound, upperBound, finiteLowerBound, finiteUpperBound));
153 }
154 
155 struct MaximumEntropyOrderStatisticsDistributionWrapper
156 {
MaximumEntropyOrderStatisticsDistributionWrapperMaximumEntropyOrderStatisticsDistributionWrapper157   MaximumEntropyOrderStatisticsDistributionWrapper(const MaximumEntropyOrderStatisticsDistribution & distribution,
158       const UnsignedInteger lower,
159       const UnsignedInteger upper,
160       const Scalar lowerBound)
161     : distribution_(distribution)
162     , lower_(lower)
163     , upper_(upper)
164     , lowerBound_(lowerBound)
165   {
166     // Nothing to do
167   }
168 
computePhiMaximumEntropyOrderStatisticsDistributionWrapper169   Point computePhi(const Point & point) const
170   {
171     const Scalar pdfUpper = distribution_.distributionCollection_[upper_].computePDF(point);
172     Scalar value = 0.0;
173     if (pdfUpper > 0.0)
174     {
175       // First, try to use complementary CDF
176       Scalar a = distribution_.distributionCollection_[lower_].computeComplementaryCDF(point);
177       Scalar b = -1.0;
178       // If the smallest complementary CDF is less than 1/2 it is better to use complementary CDF
179       if (a < 0.5) b = distribution_.distributionCollection_[upper_].computeComplementaryCDF(point);
180       // Else use CDF
181       else
182       {
183         a = distribution_.distributionCollection_[upper_].computeCDF(point);
184         b = distribution_.distributionCollection_[lower_].computeCDF(point);
185       }
186       if (b > a) value = pdfUpper / (b - a);
187     }
188     return Point(1, value);
189   }
190 
computePartialFactorMaximumEntropyOrderStatisticsDistributionWrapper191   Point computePartialFactor(const Point & point) const
192   {
193     return Point(1, distribution_.computeFactor(upper_, lowerBound_, point[0]));
194   }
195 
computePartialExponentialFactorMaximumEntropyOrderStatisticsDistributionWrapper196   Point computePartialExponentialFactor(const Point & point) const
197   {
198     return Point(1, distribution_.computeExponentialFactor(upper_, lowerBound_, point[0]));
199   }
200 
201   const MaximumEntropyOrderStatisticsDistribution & distribution_;
202   const UnsignedInteger lower_;
203   const UnsignedInteger upper_;
204   const Scalar lowerBound_;
205 }; // struct MaximumEntropyOrderStatisticsDistributionWrapper
206 
207 
208 /* Compute the exponential factor */
computeExponentialFactor(const UnsignedInteger k,const Scalar x,const Scalar y) const209 Scalar MaximumEntropyOrderStatisticsDistribution::computeExponentialFactor(const UnsignedInteger k,
210     const Scalar x,
211     const Scalar y) const
212 {
213   if (y < x)
214   {
215     const Scalar value = computeExponentialFactor(k, y, x);
216     if (value == 0.0) return SpecFunc::MaxScalar;
217     return 1.0 / value;
218   }
219   // Generic part, no approximation here
220   if (x == y)
221   {
222     const Scalar value = 1.0;
223     return value;
224   }
225   const Scalar a = distributionCollection_[k].getRange().getLowerBound()[0];
226   if (y <= a) return 1.0;
227   const Scalar b = distributionCollection_[k].getRange().getUpperBound()[0];
228   if (y >= b) return 0.0;
229   const Scalar beta = distributionCollection_[k - 1].getRange().getUpperBound()[0];
230   if (x >= beta) return distributionCollection_[k].computeComplementaryCDF(y) / distributionCollection_[k].computeComplementaryCDF(x);
231   // Here the computation depends on the use of approximation
232   if (!useApproximation_)
233   {
234     const Scalar factor = computeFactor(k, x, y);
235     return std::exp(-factor);
236   }
237   // Here we know that x < y, y > a, y < b, x < beta
238   if (x <= a)
239   {
240     // x <= a, y > a, y <= beta
241     if (y <= beta) return exponentialFactorApproximation_[k - 1](Point(1, y))[0];
242     // x <= a, y > beta, y < b
243     const Scalar ccdfY = distributionCollection_[k].computeComplementaryCDF(y);
244     const Scalar ccdfBeta = distributionCollection_[k].computeComplementaryCDF(beta);
245     const Scalar rho = ccdfY / ccdfBeta;
246     return exponentialFactorApproximation_[k - 1](Point(1, beta))[0] * rho;
247   }
248   // x > a, x < beta
249   // y <= beta
250   if (y <= beta) return exponentialFactorApproximation_[k - 1](Point(1, y))[0] / exponentialFactorApproximation_[k - 1](Point(1, x))[0];
251   // x > a, y > beta, y < b
252   const Scalar ccdfY = distributionCollection_[k].computeComplementaryCDF(y);
253   const Scalar ccdfBeta = distributionCollection_[k].computeComplementaryCDF(beta);
254   const Scalar rho = ccdfY / ccdfBeta;
255   return exponentialFactorApproximation_[k - 1](Point(1, beta))[0] / exponentialFactorApproximation_[k - 1](Point(1, x))[0] * rho;
256 }
257 
258 /* Compute the factor */
computeFactor(const UnsignedInteger k,const Scalar x,const Scalar y) const259 Scalar MaximumEntropyOrderStatisticsDistribution::computeFactor(const UnsignedInteger k,
260     const Scalar x,
261     const Scalar y) const
262 {
263   if (y < x) return -computeFactor(k, y, x);
264   // Generic part, no approximation here
265   if (x == y)
266   {
267     const Scalar value = 0.0;
268     return value;
269   }
270   const Scalar a = distributionCollection_[k].getRange().getLowerBound()[0];
271   if (y <= a) return 0.0;
272   const Scalar b = distributionCollection_[k].getRange().getUpperBound()[0];
273   if (y >= b) return SpecFunc::MaxScalar;
274   const Scalar beta = distributionCollection_[k - 1].getRange().getUpperBound()[0];
275   if (x >= beta)
276   {
277     const Scalar value = std::log(distributionCollection_[k].computeComplementaryCDF(y) / distributionCollection_[k].computeComplementaryCDF(x));
278     return value;
279   }
280   if (useApproximation_)
281   {
282     const Scalar exponentialFactor = computeExponentialFactor(k, x, y);
283     if (exponentialFactor == 0.0) return SpecFunc::MaxScalar;
284     return -std::log(exponentialFactor);
285   }
286   const MaximumEntropyOrderStatisticsDistributionWrapper phiKWrapper(*this, k - 1, k, a);
287   const Function fPhiK(bindMethod<MaximumEntropyOrderStatisticsDistributionWrapper, Point, Point>(phiKWrapper, &MaximumEntropyOrderStatisticsDistributionWrapper::computePhi, 1, 1));
288   Scalar error = -1.0;
289   // Here we know that x < y, y > a, y < b, x < beta
290   if (x <= a)
291   {
292     // x <= a, y > a, y <= beta
293     if (y <= beta)
294     {
295       const Scalar value = integrator_.integrate(fPhiK, Interval(a, y), error)[0];
296       return value;
297     }
298     // x <= a, y > beta, y < b
299     const Scalar ccdfY = distributionCollection_[k].computeComplementaryCDF(y);
300     const Scalar ccdfBeta = distributionCollection_[k].computeComplementaryCDF(beta);
301     const Scalar rho = ccdfY / ccdfBeta;
302     const Scalar value = integrator_.integrate(fPhiK, Interval(a, beta), error)[0] - std::log(rho);
303     return value;
304   }
305   // x > a, x < beta
306   // x > a, y <= beta
307   if (y <= beta)
308   {
309     const Scalar value = integrator_.integrate(fPhiK, Interval(x, y), error)[0];
310     return value;
311   }
312   // x > a, y > beta, y < b
313   const Scalar ccdfY = distributionCollection_[k].computeComplementaryCDF(y);
314   const Scalar ccdfBeta = distributionCollection_[k].computeComplementaryCDF(beta);
315   const Scalar rho = ccdfY / ccdfBeta;
316   const Scalar value = integrator_.integrate(fPhiK, Interval(x, beta), error)[0] - std::log(rho);
317   return value;
318 }
319 
320 /* Get one realization of the distribution */
getRealization() const321 Point MaximumEntropyOrderStatisticsDistribution::getRealization() const
322 {
323   const UnsignedInteger dimension = getDimension();
324 
325   Point x(1);
326   x[0] = distributionCollection_[0].getRealization()[0];
327   for(UnsignedInteger k = 1; k < dimension; ++ k)
328   {
329     const Scalar xK = computeConditionalQuantile(RandomGenerator::Generate(), x);
330     x.add(xK);
331   }
332   return x;
333 }
334 
335 /* Build a C1 interpolation of the exponential factor between the two given marginals */
interpolateExponentialFactor(const UnsignedInteger lower,const UnsignedInteger upper,const UnsignedInteger maximumSubdivision,const Scalar shift) const336 PiecewiseHermiteEvaluation MaximumEntropyOrderStatisticsDistribution::interpolateExponentialFactor(const UnsignedInteger lower,
337     const UnsignedInteger upper,
338     const UnsignedInteger maximumSubdivision,
339     const Scalar shift) const
340 {
341   if (lower >= upper) throw InvalidArgumentException(HERE) << "Error: expected lower=" << lower << " to be less than upper=" << upper;
342   const Scalar xMin = distributionCollection_[upper].getRange().getLowerBound()[0];
343   const Scalar xMax = distributionCollection_[lower].getRange().getUpperBound()[0];
344   const MaximumEntropyOrderStatisticsDistributionWrapper phiWrapper(*this, lower, upper, xMin);
345   const Function phi(bindMethod<MaximumEntropyOrderStatisticsDistributionWrapper, Point, Point>(phiWrapper, &MaximumEntropyOrderStatisticsDistributionWrapper::computePartialExponentialFactor, 1, 1));
346   Point lowerBounds;
347   Point upperBounds;
348   Sample contributions;
349   Point localErrors;
350   Scalar error = -1.0;
351   // We integrate the exponential factor in order to detect all the singularities using polynomial approximations of different order
352   const Point tmp(GaussKronrod(ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-ExponentialFactorDiscretization"), ResourceMap::GetAsScalar("GaussKronrod-MaximumError"), GaussKronrodRule(GaussKronrodRule::G1K3)).integrate(phi, xMin, xMax, error, lowerBounds, upperBounds, contributions, localErrors));
353   // Now, we have to sort the intervals in order to build the approximation
354   std::sort(upperBounds.begin(), upperBounds.end());
355   // Here we have to subdivide the intervals to take into account the poorer approximation given by Hermite polynomials
356   Scalar a = std::abs(xMin) < shift ? shift : xMin + shift * std::abs(xMin);
357   Point locations(1, a);
358   for (UnsignedInteger i = 0; i < upperBounds.getSize(); ++i)
359   {
360     const Scalar b = upperBounds[i];
361     const Scalar step = (b - a) / maximumSubdivision;
362     for (UnsignedInteger j = 1; j <= maximumSubdivision; ++j) locations.add(a + j * step);
363     a = b;
364   }
365   const UnsignedInteger size = locations.getSize();
366   Point values(size);
367   Point derivatives(size);
368   for (UnsignedInteger i = 0; i < size; ++i)
369   {
370     const Point x(1, locations[i]);
371     const Scalar exponentialScalar = phiWrapper.computePartialExponentialFactor(x)[0];
372     values[i] = exponentialScalar;
373     derivatives[i] = -phiWrapper.computePhi(x)[0] * exponentialScalar;
374   }
375   return PiecewiseHermiteEvaluation(locations, values, derivatives);
376 }
377 
378 /* Build a C1 interpolation of the exponential factors in the PDF */
interpolateExponentialFactors()379 void MaximumEntropyOrderStatisticsDistribution::interpolateExponentialFactors()
380 {
381   // Use exact values to build the approximation
382   useApproximation_ = false;
383   UnsignedInteger dimension = getDimension();
384   exponentialFactorApproximation_ = Collection<PiecewiseHermiteEvaluation>(dimension - 1);
385   const UnsignedInteger maximumSubdivision = ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-MaximumApproximationSubdivision");
386   const Scalar shift = ResourceMap::GetAsScalar("MaximumEntropyOrderStatisticsDistribution-SupportShift");
387   for(UnsignedInteger k = 1; k < dimension; ++k)
388   {
389     if (!partition_.contains(k - 1))
390       exponentialFactorApproximation_[k - 1] = interpolateExponentialFactor(k - 1, k, maximumSubdivision, shift);
391   } // k
392   // Force parallelism here
393   isParallel_ = true;
394   useApproximation_ = true;
395 }
396 
397 /* Get the kth approximation */
getApproximation(const UnsignedInteger k) const398 PiecewiseHermiteEvaluation MaximumEntropyOrderStatisticsDistribution::getApproximation(const UnsignedInteger k) const
399 {
400   if (k >= exponentialFactorApproximation_.getSize()) throw InvalidArgumentException(HERE) << "Error: the index=" << k << " must be less than " << exponentialFactorApproximation_.getSize();
401   return exponentialFactorApproximation_[k];
402 }
403 
404 /* Get the PDF of the distribution */
computePDF(const Point & point) const405 Scalar MaximumEntropyOrderStatisticsDistribution::computePDF(const Point & point) const
406 {
407   const UnsignedInteger dimension = getDimension();
408 
409   if (point.getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension();
410 
411   // Early exit if the point is not in the support
412   for (UnsignedInteger k = 1; k < dimension; ++ k)
413     if (point[k - 1] > point[k]) return 0.0;
414   if (!getRange().numericallyContains(point)) return 0.0;
415 
416   // Early exit for the independent case
417   if (hasIndependentCopula())
418   {
419     Scalar pdfValue = distributionCollection_[0].computePDF(point[0]);
420     for (UnsignedInteger k = 1; k < dimension; ++k) pdfValue *= distributionCollection_[k].computePDF(point[k]);
421     return pdfValue;
422   }
423 
424   // Here we have to compute something
425   Scalar productPDF = distributionCollection_[0].computePDF(point[0]);
426   for (UnsignedInteger k = 1; k < dimension; ++k)
427   {
428     if (!partition_.contains(k - 1))
429     {
430       // Compute the lower bound of the integral. The integrand is zero outside of the range of the kth distribution
431       const Scalar xMin = std::max(point[k - 1], distributionCollection_[k].getRange().getLowerBound()[0]);
432       // Compute the upper bound of the integral. The integral has a closed form outside of the range of the (k-1)th distribution, but we have still to compute the integral on the intersection with this range
433       const Scalar xK = point[k];
434       const Scalar bKm1 = distributionCollection_[k - 1].getRange().getUpperBound()[0];
435       Scalar xMax = 0.0;
436       Scalar cdfKm1 = 0.0;
437       if (bKm1 < xK)
438       {
439         xMax = bKm1;
440         cdfKm1 = 1.0;
441       }
442       else
443       {
444         xMax = xK;
445         cdfKm1 = distributionCollection_[k - 1].computeCDF(xMax);
446       }
447       Scalar cdfK = distributionCollection_[k].computeCDF(xMax);
448       const Scalar pdfK = distributionCollection_[k].computePDF(point[k]);
449       const Scalar exponentialFactor = computeExponentialFactor(k, xMin, xMax);
450       productPDF *=  pdfK * exponentialFactor / (cdfKm1 - cdfK);
451     } // Partition
452   } // Loop over k
453   return productPDF;
454 }
455 
456 
457 /* Get the log PDF of the distribution */
computeLogPDF(const Point & point) const458 Scalar MaximumEntropyOrderStatisticsDistribution::computeLogPDF(const Point & point) const
459 {
460   const UnsignedInteger dimension = getDimension();
461 
462   if (point.getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension();
463 
464   // Early exit if the point is not in the support
465   for (UnsignedInteger k = 1; k < dimension; ++ k)
466     if (point[k - 1] > point[k]) return SpecFunc::LowestScalar;
467   if (!getRange().numericallyContains(point)) return SpecFunc::LowestScalar;
468 
469   // Early exit for the independent case
470   if (hasIndependentCopula())
471   {
472     Scalar logPDFValue = distributionCollection_[0].computeLogPDF(point[0]);
473     for (UnsignedInteger k = 1; k < dimension; ++k) logPDFValue += distributionCollection_[k].computeLogPDF(point[k]);
474     return logPDFValue;
475   }
476 
477   // Here we have to compute something
478   Scalar sumLogPDF = distributionCollection_[0].computeLogPDF(point[0]);
479   for (UnsignedInteger k = 1; k < dimension; ++k)
480   {
481     if (!partition_.contains(k - 1))
482     {
483       // Compute the lower bound of the integral. The integrand is zero outside of the range of the kth distribution
484       const Scalar xMin = std::max(point[k - 1], distributionCollection_[k].getRange().getLowerBound()[0]);
485       // Compute the upper bound of the integral. The integral has a closed form outside of the range of the (k-1)th distribution, but we have still to compute the integral on the intersection with this range
486       const Scalar xK = point[k];
487       const Scalar bKm1 = distributionCollection_[k - 1].getRange().getUpperBound()[0];
488       Scalar xMax = 0.0;
489       Scalar cdfKm1 = 0.0;
490       if (bKm1 < xK)
491       {
492         xMax = bKm1;
493         cdfKm1 = 1.0;
494       }
495       else
496       {
497         xMax = xK;
498         cdfKm1 = distributionCollection_[k - 1].computeCDF(xMax);
499       }
500       Scalar cdfK = distributionCollection_[k].computeCDF(xMax);
501       const Scalar logPDFK = distributionCollection_[k].computeLogPDF(point[k]);
502       const Scalar factor = computeFactor(k, xMin, xMax);
503       sumLogPDF +=  logPDFK - factor - std::log(cdfKm1 - cdfK);
504     } // Partition
505   } // Loop over k
506   return sumLogPDF;
507 }
508 
509 
510 /* Get the CDF of the distribution */
computeCDF(const Point & point) const511 Scalar MaximumEntropyOrderStatisticsDistribution::computeCDF(const Point & point) const
512 {
513   const UnsignedInteger dimension = getDimension();
514   if (point.getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension();
515   // If there is a miracle: we are in the independent case!
516   if (hasIndependentCopula())
517   {
518     Scalar cdf = distributionCollection_[0].computeCDF(point[0]);
519     for (UnsignedInteger k = 1; k < dimension; ++k) cdf *= distributionCollection_[k].computeCDF(point[k]);
520     return cdf;
521   }
522   // Indices of the components to take into account in the computation
523   Indices toKeep(0);
524   Point reducedPoint(0);
525   const Point lowerBound(getRange().getLowerBound());
526   const Point upperBound(getRange().getUpperBound());
527   for (UnsignedInteger k = 0; k < dimension; ++ k)
528   {
529     const Scalar xK = point[k];
530     // Early exit if one component is nonpositive
531     if (xK <= lowerBound[k]) return 0.0;
532     // Keep only the indices for which xK is in (xk_min, xk_max) and xK < xKp1
533     // Marginalize the others
534     const Scalar bound = k < dimension - 1 ? std::min(point[k + 1], upperBound[k]) : upperBound[k];
535     if (xK < bound)
536     {
537       toKeep.add(k);
538       reducedPoint.add(xK);
539     }
540   } // k
541   // If all the components are greater or equal to their marginal upper bound
542   if (toKeep.getSize() == 0)
543   {
544     return 1.0;
545   }
546   // If one or more components (but not all) are greater or equal to their marginal upper bound compute a marginal CDF
547   if (toKeep.getSize() < dimension)
548   {
549     const Scalar cdf = getMarginal(toKeep).computeCDF(reducedPoint);
550     return cdf;
551   }
552   // Else we have to do some work
553   // Try to split the work into smaller pieces using potential block-independence
554   const UnsignedInteger partitionSize = partition_.getSize();
555   if (partitionSize > 0)
556   {
557     Scalar cdf = 1.0;
558     UnsignedInteger firstIndex = 0;
559     for (UnsignedInteger i = 0; i <= partitionSize; ++i)
560     {
561       const UnsignedInteger lastIndex = i < partitionSize ? partition_[i] + 1 : dimension;
562       Indices dependentBlockIndices(lastIndex - firstIndex);
563       dependentBlockIndices.fill(firstIndex);
564       const UnsignedInteger blockSize = dependentBlockIndices.getSize();
565       reducedPoint = Point(blockSize);
566       for (UnsignedInteger k = 0; k < blockSize; ++k) reducedPoint[k] = point[firstIndex + k];
567       // The cdf is obtained by multiplying lower dimensional cdf, which are much more cheaper to compute than a full multidimensional integration
568       const Distribution marginal(getMarginal(dependentBlockIndices));
569       const Scalar blockCDF = marginal.computeCDF(reducedPoint);
570       cdf *= blockCDF;
571       firstIndex = lastIndex;
572     }
573     return cdf;
574   }
575 
576   // Here we are in the full dependent case. Use Gauss-Legendre integration restricted to the support of the copula.
577   // We know that for each k, xk is in (xk_min, xk_max) and for k<dim, xk<xkp1
578   Scalar cdf = 0.0;
579 
580   Point gaussWeights;
581   const Point gaussNodes(getGaussNodesAndWeights(gaussWeights));
582   // Perform the integration
583   // There are N^{d-1} integration points to compute:
584   // I = \int_{lb_1}^{x_1}\int_{\max(t_1, lb_2)}^{x_2}\dots\int_{\max(t_{d-2}, lb_{d-1})}^{x_{d-1}}F(x_d|t_1,\dots,t_{d-1})pdf(t_1,\dots,t_{d-1})dt_1\dots dt_{d-1}
585   const UnsignedInteger marginalNodesNumber = getIntegrationNodesNumber();
586   const UnsignedInteger size = std::pow(static_cast< Scalar > (marginalNodesNumber), static_cast< Scalar > (dimension - 1));
587   Indices indices(dimension, 0);
588   Indices marginalIndices(dimension - 1);
589   marginalIndices.fill();
590   const Scalar x = point[dimension - 1];
591   Distribution marginal(getMarginal(marginalIndices));
592   for (UnsignedInteger linearIndex = 0; linearIndex < size; ++linearIndex)
593   {
594     Point node(dimension - 1);
595     const Scalar delta0 = 0.5 * (point[0] - lowerBound[0]);
596     const UnsignedInteger index0 = indices[0];
597     node[0] = lowerBound[0] + delta0 * (1.0 + gaussNodes[index0]);
598     Scalar weight = delta0 * gaussWeights[index0];
599     for (UnsignedInteger j = 1; j < dimension - 1; ++j)
600     {
601       const UnsignedInteger indexJ = indices[j];
602       const Scalar aJ = std::max(node[j - 1], distributionCollection_[j].getRange().getLowerBound()[0]);
603       const Scalar deltaJ = 0.5 * (point[j] - aJ);
604       node[j] = aJ + deltaJ * (1.0 + gaussNodes[indexJ]);
605       weight *= deltaJ * gaussWeights[indexJ];
606     }
607     cdf += weight * marginal.computePDF(node) * computeConditionalCDF(x, node);
608     /* Update the indices */
609     ++indices[0];
610     /* Propagate the remainders */
611     for (UnsignedInteger j = 0; j < dimension - 2; ++j) indices[j + 1] += (indices[j] == marginalNodesNumber);
612     /* Correction of the indices. The last index cannot overflow. */
613     for (UnsignedInteger j = 0; j < dimension - 2; ++j) indices[j] = indices[j] % marginalNodesNumber;
614   } // Loop over the n-D nodes
615   return cdf;
616 }
617 
618 
computeCDFOld(const Point & point) const619 Scalar MaximumEntropyOrderStatisticsDistribution::computeCDFOld(const Point & point) const
620 {
621   const UnsignedInteger dimension = getDimension();
622   if (point.getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension();
623   // If there is a miracle: we are in the independent case!
624   if (hasIndependentCopula())
625   {
626     Scalar cdf = distributionCollection_[0].computeCDF(point[0]);
627     for (UnsignedInteger k = 1; k < dimension; ++k) cdf *= distributionCollection_[k].computeCDF(point[k]);
628     return cdf;
629   }
630   // Indices of the components to take into account in the computation
631   Indices toKeep(0);
632   Point reducedPoint(0);
633   const Point lowerBound(getRange().getLowerBound());
634   const Point upperBound(getRange().getUpperBound());
635   for (UnsignedInteger k = 0; k < dimension; ++ k)
636   {
637     const Scalar xK = point[k];
638     // Early exit if one component is nonpositive
639     if (xK <= lowerBound[k]) return 0.0;
640     // Keep only the indices for which xK is in (xk_min, xk_max) and xK < xKp1
641     // Marginalize the others
642     const Scalar bound = k < dimension - 1 ? std::min(point[k + 1], upperBound[k]) : upperBound[k];
643     if (xK < bound)
644     {
645       toKeep.add(k);
646       reducedPoint.add(xK);
647     }
648   } // k
649   // If all the components are greater or equal to their marginal upper bound
650   if (toKeep.getSize() == 0)
651   {
652     return 1.0;
653   }
654   // If one or more components (but not all) are greater or equal to their marginal upper bound compute a marginal CDF
655   if (toKeep.getSize() < dimension)
656   {
657     const Scalar cdf = getMarginal(toKeep).computeCDF(reducedPoint);
658     return cdf;
659   }
660   // Else we have to do some work
661   // Try to split the work into smaller pieces using potential block-independence
662   const UnsignedInteger partitionSize = partition_.getSize();
663   if (partitionSize > 0)
664   {
665     Scalar cdf = 1.0;
666     UnsignedInteger firstIndex = 0;
667     for (UnsignedInteger i = 0; i <= partitionSize; ++i)
668     {
669       const UnsignedInteger lastIndex = i < partitionSize ? partition_[i] + 1 : dimension;
670       Indices dependentBlockIndices(lastIndex - firstIndex);
671       dependentBlockIndices.fill(firstIndex);
672       const UnsignedInteger blockSize = dependentBlockIndices.getSize();
673       reducedPoint = Point(blockSize);
674       for (UnsignedInteger k = 0; k < blockSize; ++k) reducedPoint[k] = point[firstIndex + k];
675       // The cdf is obtained by multiplying lower dimensional cdf, which are much more cheaper to compute than a full multidimensional integration
676       const Distribution marginal(getMarginal(dependentBlockIndices));
677       const Scalar blockCDF = marginal.computeCDF(reducedPoint);
678       cdf *= blockCDF;
679       firstIndex = lastIndex;
680     }
681     return cdf;
682   }
683 
684   // Here we are in the full dependent case. Use Gauss-Legendre integration restricted to the support of the copula.
685   // We know that for each k, xk is in (xk_min, xk_max) and for k<dim, xk<xkp1
686   Scalar cdf = 0.0;
687 
688   Point gaussWeights;
689   const Point gaussNodes(getGaussNodesAndWeights(gaussWeights));
690   // Perform the integration
691   // There are N^{d-1} integration points to compute:
692   // I = \int_{lb_1}^{x_1}\int_{\max(t_1, lb_2)}^{x_2}\dots\int_{\max(t_{d-2}, lb_{d-1})}^{x_{d-1}}F(x_d|t_1,\dots,t_{d-1})pdf(t_1,\dots,t_{d-1})dt_1\dots dt_{d-1}
693   const UnsignedInteger marginalNodesNumber = getIntegrationNodesNumber();
694   const UnsignedInteger size = std::pow(static_cast< Scalar > (marginalNodesNumber), static_cast< Scalar > (dimension));
695   Indices indices(dimension, 0);
696   for (UnsignedInteger linearIndex = 0; linearIndex < size; ++linearIndex)
697   {
698     Point node(dimension);
699     const Scalar delta0 = 0.5 * (point[0] - lowerBound[0]);
700     const UnsignedInteger index0 = indices[0];
701     node[0] = lowerBound[0] + delta0 * (1.0 + gaussNodes[index0]);
702     Scalar weight = delta0 * gaussWeights[index0];
703     for (UnsignedInteger j = 1; j < dimension; ++j)
704     {
705       const UnsignedInteger indexJ = indices[j];
706       const Scalar aJ = std::max(node[j - 1], distributionCollection_[j].getRange().getLowerBound()[0]);
707       const Scalar deltaJ = 0.5 * (point[j] - aJ);
708       node[j] = aJ + deltaJ * (1.0 + gaussNodes[indexJ]);
709       weight *= deltaJ * gaussWeights[indexJ];
710     }
711     cdf += weight * computePDF(node);
712     /* Update the indices */
713     ++indices[0];
714     /* Propagate the remainders */
715     for (UnsignedInteger j = 0; j < dimension - 1; ++j) indices[j + 1] += (indices[j] == marginalNodesNumber);
716     /* Correction of the indices. The last index cannot overflow. */
717     for (UnsignedInteger j = 0; j < dimension - 1; ++j) indices[j] = indices[j] % marginalNodesNumber;
718   } // Loop over the n-D nodes
719   return cdf;
720 }
721 
722 
723 /* Compute the PDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalPDF(const Scalar x,const Point & y) const724 Scalar MaximumEntropyOrderStatisticsDistribution::computeConditionalPDF (const Scalar x,
725     const Point & y) const
726 {
727   const UnsignedInteger conditioningDimension = y.getDimension();
728   if (conditioningDimension >= getDimension()) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional PDF with a conditioning point of dimension greater or equal to the distribution dimension.";
729 
730   if (conditioningDimension == 0) return distributionCollection_[0].computePDF(x);
731   const UnsignedInteger k = conditioningDimension;
732   const Scalar aK = getRange().getLowerBound()[k];
733   const Scalar bK = getRange().getUpperBound()[k];
734   // If x is outside of the range of the kth marginal, the conditional PDF is zero
735   if ((x <= aK) || (x > bK)) return 0.0;
736   // The conditional PDF depends only on the last component of the conditioning vector
737   const Scalar xKm1 = y[k - 1];
738   // If the conditioning component is greater than the argument the conditional PDF is zero
739   if (xKm1 > x) return 0.0;
740   // If the conditioning component is outside of the (k-1)th marginal range
741   const Scalar aKm1 = getRange().getLowerBound()[k - 1];
742   const Scalar bKm1 = getRange().getUpperBound()[k - 1];
743   if ((xKm1 <= aKm1) || (xKm1 > bKm1)) return 0.0;
744   // Here we have something to do
745   // If x is independent of the previous components
746   if (partition_.contains(k - 1)) return distributionCollection_[k].computePDF(x);
747   // Else the difficult case
748   // PDF(x|xKm1) = d(1-exp(-\int_{xKm1}^x\phi(s)ds)) / dx
749   //             = -d(-\int_{xKm1}^x\phi(s)ds)/dx * exp(-\int_{xKm1}^x\phi(s)ds)
750   //             = \phi(x) * exp(-\int_{xKm1}^x\phi(s)ds)
751   return distributionCollection_[k].computePDF(x) * computeExponentialFactor(k, xKm1, x) / (distributionCollection_[k - 1].computeCDF(xKm1) - distributionCollection_[k].computeCDF(xKm1));
752 }
753 
754 
755 /* Compute the CDF of Xi | X1, ..., Xi-1. x = Xi, y = (X1,...,Xi-1) */
computeConditionalCDF(const Scalar x,const Point & y) const756 Scalar MaximumEntropyOrderStatisticsDistribution::computeConditionalCDF (const Scalar x,
757     const Point & y) const
758 {
759   const UnsignedInteger conditioningDimension = y.getDimension();
760   if (conditioningDimension >= getDimension()) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional PDF with a conditioning point of dimension greater or equal to the distribution dimension.";
761 
762   if (conditioningDimension == 0)
763   {
764     const Scalar value = distributionCollection_[0].computeCDF(x);
765     return value;
766   }
767   const UnsignedInteger k = conditioningDimension;
768   const Scalar aK = getRange().getLowerBound()[k];
769   const Scalar bK = getRange().getUpperBound()[k];
770   // If x is less than the lower bound of its associated marginal, the conditional CDF is zero
771   if (x <= aK)
772   {
773     return 0.0;
774   }
775   // If x is greater than the upper bound of its associated marginal, the conditional CDF is one
776   if (x > bK)
777   {
778     return 1.0;
779   }
780   // The conditional CDF depends only on the last component of the conditioning vector
781   const Scalar xKm1 = y[k - 1];
782   // If the conditioning component is greater than the argument the conditional CDF is zero
783   if (xKm1 > x)
784   {
785     return 1.0;
786   }
787   // If the conditioning component is outside of the (k-1)th marginal range
788   const Scalar aKm1 = getRange().getLowerBound()[k - 1];
789   const Scalar bKm1 = getRange().getUpperBound()[k - 1];
790   if ((xKm1 <= aKm1) || (xKm1 > bKm1))
791   {
792     return 0.0;
793   }
794   // Here we have something to do
795   // If x is independent of the previous components
796   if (partition_.contains(k - 1))
797   {
798     const Scalar value = distributionCollection_[k].computeCDF(x);
799     return value;
800   }
801   // Else the difficult case
802   // CDF(x|xKm1) = 1 - exp(-\int_{xKm1}^x\phi(s)ds)
803   const Scalar factor = computeFactor(k, xKm1, x);
804   const Scalar value = -expm1(-factor);
805   return value;
806 }
807 
808 
809 /* Compute the quantile of Xi | X1, ..., Xi-1, i.e. x such that CDF(x|y) = q with x = Xi, y = (X1,...,Xi-1) */
computeConditionalQuantile(const Scalar q,const Point & y) const810 Scalar MaximumEntropyOrderStatisticsDistribution::computeConditionalQuantile(const Scalar q,
811     const Point & y) const
812 {
813   const UnsignedInteger conditioningDimension = y.getDimension();
814   if (conditioningDimension >= getDimension()) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional quantile with a conditioning point of dimension greater or equal to the distribution dimension.";
815   if ((q < 0.0) || (q > 1.0)) throw InvalidArgumentException(HERE) << "Error: cannot compute a conditional quantile for a probability level outside of [0, 1]";
816 
817   if (conditioningDimension == 0) return distributionCollection_[0].computeQuantile(q)[0];
818   const UnsignedInteger k = conditioningDimension;
819   if (partition_.contains(k - 1))
820   {
821     return distributionCollection_[k].computeQuantile(q)[0];
822   }
823   // We have to solve:
824   // 1 - exp(-\int_{xKm1}^x\phi(s)ds) = q
825   // <->
826   // Phi(x) - Phi(xKm1) = -log(1 - q)
827   // Factor(x, xKm1) = -log(1 - q)
828   const Scalar xKm1 = y[k - 1];
829   if (q == 0.0) return xKm1;
830   Scalar b = getRange().getUpperBound()[k];
831   if (q == 1.0) return b;
832   const Scalar logU = log1p(-q);
833   // First, try Newton iterations:
834   // Factor(xKm1, x+dx) = -log(1 - q) = Factor(xKm1, x) + f_k(x) / (F_{k-1}(x) - F_k(x)) dk
835   // -> dx = (log(1 - q) + Factor(xKm1, x))(F_k(x) - F_{k-1}(x)) / f_k(x)
836   Scalar a = xKm1;
837   Scalar x = 0.5 * (a + b);
838   UnsignedInteger iteration = 0;
839   const UnsignedInteger maximumIteration = ResourceMap::GetAsUnsignedInteger("MaximumEntropyOrderStatisticsDistribution-MaximumQuantileIteration");
840   Bool convergence = false;
841   do
842   {
843     ++iteration;
844     const Scalar pdfKX = distributionCollection_[k].computePDF(x);
845     if (pdfKX == 0.0) break;
846     const Scalar cdfKX = distributionCollection_[k].computeCDF(x);
847     const Scalar cdfKm1X = distributionCollection_[k - 1].computeCDF(x);
848     const Scalar fX = logU + computeFactor(k, xKm1, x);
849     if (fX < 0.0)
850     {
851       a = x;
852     }
853     else
854     {
855       b = x;
856     }
857     const Scalar delta = fX * (cdfKX - cdfKm1X) / pdfKX;
858     x += delta;
859     convergence = std::abs(delta) < quantileEpsilon_;
860   }
861   while (!convergence && (iteration < maximumIteration) && (a <= x) && (x <= b));
862   if (convergence) return x;
863   // in some cases Newton iteration fails to converge
864   const MaximumEntropyOrderStatisticsDistributionWrapper wrapper(*this, k - 1, k, xKm1);
865   const Function f(bindMethod<MaximumEntropyOrderStatisticsDistributionWrapper, Point, Point>(wrapper, &MaximumEntropyOrderStatisticsDistributionWrapper::computePartialFactor, 1, 1));
866   Brent solver(quantileEpsilon_, cdfEpsilon_, cdfEpsilon_, quantileIterations_);
867   return solver.solve(f, -logU, a, b);
868 }
869 
870 
871 /* Get the i-th marginal distribution */
getMarginal(const UnsignedInteger i) const872 Distribution MaximumEntropyOrderStatisticsDistribution::getMarginal(const UnsignedInteger i) const
873 {
874   if (i >= getDimension()) throw InvalidArgumentException(HERE) << "The index of a marginal distribution must be in the range [0, dim-1]";
875   MaximumEntropyOrderStatisticsDistribution::Implementation marginal(distributionCollection_[i].getImplementation()->clone());
876   marginal->setDescription(Description(1, getDescription()[i]));
877   return marginal;
878 }
879 
880 
881 /* Get the distribution of the marginal distribution corresponding to indices dimensions */
getMarginal(const Indices & indices) const882 Distribution MaximumEntropyOrderStatisticsDistribution::getMarginal(const Indices & indices) const
883 {
884   const UnsignedInteger size = indices.getSize();
885   if (size == 1) return getMarginal(indices[0]);
886   return getMarginalAsMaximumEntropyOrderStatisticsDistribution(indices).clone();
887 }
888 
getMarginalAsMaximumEntropyOrderStatisticsDistribution(const Indices & indices) const889 MaximumEntropyOrderStatisticsDistribution MaximumEntropyOrderStatisticsDistribution::getMarginalAsMaximumEntropyOrderStatisticsDistribution(const Indices & indices) const
890 {
891   const UnsignedInteger size = indices.getSize();
892   if (size < 2) throw InvalidArgumentException(HERE) << "indices must be of size at least 2";
893   const UnsignedInteger dimension = getDimension();
894   if (!indices.check(dimension)) throw InvalidArgumentException(HERE) << "The indices of a marginal distribution must be in the range [0, dim-1] and must be different";
895   if (!indices.isIncreasing()) throw InvalidArgumentException(HERE) << "Cannot take the marginal distribution of an order statistics distribution with nonincreasing indices.";
896   // Here we know that if the size is equal to the dimension, the indices are [0,...,dimension-1]
897   if (size == dimension) return *this;
898   // This call will check that indices are correct
899   DistributionCollection marginalDistributions(size);
900   Description marginalDescription(size);
901   const Description description(getDescription());
902   Collection<PiecewiseHermiteEvaluation> marginalExponentialFactorApproximation(0);
903   for (UnsignedInteger i = 0; i < size; ++i)
904   {
905     const UnsignedInteger j = indices[i];
906     marginalDistributions[i] = distributionCollection_[j];
907     marginalDescription[i] = description[j];
908     if (useApproximation_ && (i > 0))
909     {
910       const UnsignedInteger jPrec = indices[i - 1];
911       if (j == jPrec + 1) marginalExponentialFactorApproximation.add(exponentialFactorApproximation_[j - 1]);
912       else
913       {
914         marginalExponentialFactorApproximation.add(interpolateExponentialFactor(jPrec, j));
915       }
916     }
917   }
918   OrderStatisticsMarginalChecker checker(marginalDistributions);
919   const Indices marginalPartition(checker.buildPartition());
920   MaximumEntropyOrderStatisticsDistribution marginal(marginalDistributions, marginalPartition, useApproximation_, marginalExponentialFactorApproximation, marginalDescription);
921   return marginal;
922 }
923 
924 
925 /* Distribution collection accessor */
setDistributionCollection(const DistributionCollection & coll,const Bool useApprox,const Bool checkMarginals)926 void MaximumEntropyOrderStatisticsDistribution::setDistributionCollection(const DistributionCollection & coll,
927     const Bool useApprox,
928     const Bool checkMarginals)
929 {
930   OrderStatisticsMarginalChecker checker(coll);
931   if (checkMarginals) checker.check();
932   partition_ = checker.buildPartition();
933   setDimension(coll.getSize());
934 
935   // Check if the collection is not empty
936   const UnsignedInteger size = coll.getSize();
937   Description description(size);
938   Point lowerBound(size);
939   Point upperBound(size);
940   Interval::BoolCollection finiteLowerBound(size);
941   Interval::BoolCollection finiteUpperBound(size);
942   if (size == 0) throw InvalidArgumentException(HERE) << "Collection of distributions is empty";
943   // First, check that all the marginal distributions are of dimension 1
944   isParallel_ = true;
945   for (UnsignedInteger i = 0; i < size; ++i)
946   {
947     if (coll[i].getDimension() != 1) throw InvalidArgumentException(HERE) << "The marginal distribution " << i << " is of dimension " << coll[i].getDimension() << ", which is different from 1.";
948     isParallel_ = isParallel_ && coll[i].getImplementation()->isParallel();
949     const Interval marginalRange(coll[i].getRange());
950     lowerBound[i] = marginalRange.getLowerBound()[0];
951     upperBound[i] = marginalRange.getUpperBound()[0];
952     finiteLowerBound[i] = marginalRange.getFiniteLowerBound()[0];
953     finiteUpperBound[i] = marginalRange.getFiniteUpperBound()[0];
954     // The description of the ComposedDistribution is built first by using the marginal description
955     // then by using the marginal name if the description is empty, which should never occur
956     const String marginalDescription(coll[i].getDescription()[0]);
957     if (marginalDescription.size() > 0) description[i] = marginalDescription;
958     else
959     {
960       LOGINFO(OSS() << "Warning: using the name of the marginal " << i << " instead of its description for building the description of the ComposedDistribution, because the marginal description is empty.");
961       const String marginalName(coll[i].getName());
962       description[i] = marginalName;
963     }
964   }
965 
966   // Everything is ok, store the collection
967   distributionCollection_ = coll;
968   isAlreadyComputedMean_ = false;
969   isAlreadyComputedCovariance_ = false;
970   setDescription(description);
971   setRange(Interval(lowerBound, upperBound, finiteLowerBound, finiteUpperBound));
972   // We must set useApproximation_ to false even if we use approximation, as we need to perform exact computations to build the approximation. The flag is set to the correct value by interpolateExponentialFactors()
973   useApproximation_ = false;
974   if (useApprox) interpolateExponentialFactors();
975 }
976 
977 
978 /* Parameters value and description accessor */
getParametersCollection() const979 MaximumEntropyOrderStatisticsDistribution::PointWithDescriptionCollection MaximumEntropyOrderStatisticsDistribution::getParametersCollection() const
980 {
981   const UnsignedInteger dimension = getDimension();
982   PointWithDescriptionCollection parameters(dimension);
983   const Description description(getDescription());
984   // First put the marginal parameters
985   for (UnsignedInteger marginalIndex = 0; marginalIndex < dimension; ++marginalIndex)
986   {
987     // Each marginal distribution must output a collection of parameters of size 1, even if it contains an empty Point
988     const PointWithDescriptionCollection marginalParameters(distributionCollection_[marginalIndex].getParametersCollection());
989     PointWithDescription point(marginalParameters[0]);
990     Description marginalParametersDescription(point.getDescription());
991     // Here we must add a unique prefix to the marginal parameters description in order to deambiguate the parameters of different marginals sharing the same description
992     for (UnsignedInteger i = 0; i < point.getDimension(); ++i) marginalParametersDescription[i] = (OSS() << marginalParametersDescription[i] << "_marginal_" << marginalIndex);
993     point.setDescription(marginalParametersDescription);
994     point.setName(description[marginalIndex]);
995     parameters[marginalIndex] = point;
996   } // marginalIndex
997   return parameters;
998 } // getParametersCollection
999 
1000 
setParametersCollection(const PointCollection & parametersCollection)1001 void MaximumEntropyOrderStatisticsDistribution::setParametersCollection(const PointCollection& parametersCollection)
1002 {
1003   const UnsignedInteger dimension = getDimension();
1004   if (parametersCollection.getSize() != dimension) throw InvalidArgumentException(HERE) << "The collection is too small(" << parametersCollection.getSize() << "). Expected (" << dimension << ")";
1005 
1006   // set marginal parameters
1007   for (UnsignedInteger marginalIndex = 0; marginalIndex < dimension; ++marginalIndex)
1008     distributionCollection_[marginalIndex].setParameter(parametersCollection[marginalIndex]);
1009 }
1010 
1011 
getDistributionCollection() const1012 MaximumEntropyOrderStatisticsDistribution::DistributionCollection MaximumEntropyOrderStatisticsDistribution::getDistributionCollection() const
1013 {
1014   return distributionCollection_;
1015 }
1016 
1017 /* Get the copula of the distribution */
getCopula() const1018 Distribution MaximumEntropyOrderStatisticsDistribution::getCopula() const
1019 {
1020   return new MaximumEntropyOrderStatisticsCopula(*this);
1021 }
1022 
1023 /* Flag to tell if we use approximation for the exponential term */
useApproximation(const bool flag)1024 void MaximumEntropyOrderStatisticsDistribution::useApproximation(const bool flag)
1025 {
1026   if (flag != useApproximation_)
1027   {
1028     useApproximation_ = flag;
1029     if (flag) interpolateExponentialFactors();
1030   }
1031 }
1032 
1033 /* Tell if the distribution has elliptical copula */
hasEllipticalCopula() const1034 Bool MaximumEntropyOrderStatisticsDistribution::hasEllipticalCopula() const
1035 {
1036   return hasIndependentCopula();
1037 }
1038 
1039 /* Tell if the distribution has independent copula */
hasIndependentCopula() const1040 Bool MaximumEntropyOrderStatisticsDistribution::hasIndependentCopula() const
1041 {
1042   return partition_.getSize() == (getDimension() - 1);
1043 }
1044 
1045 /* Method save() stores the object through the StorageManager */
save(Advocate & adv) const1046 void MaximumEntropyOrderStatisticsDistribution::save(Advocate & adv) const
1047 {
1048   ContinuousDistribution::save(adv);
1049   adv.saveAttribute("distributionCollection_", distributionCollection_);
1050 }
1051 
1052 /* Method load() reloads the object from the StorageManager */
load(Advocate & adv)1053 void MaximumEntropyOrderStatisticsDistribution::load(Advocate & adv)
1054 {
1055   ContinuousDistribution::load(adv);
1056   DistributionPersistentCollection coll;
1057   adv.loadAttribute("distributionCollection_", coll);
1058   setDistributionCollection(coll);
1059 }
1060 
1061 
1062 END_NAMESPACE_OPENTURNS
1063