1 //                                               -*- C++ -*-
2 /**
3  *  @brief Abstract top-level class for all RandomMixtures
4  *
5  *  Copyright 2005-2021 Airbus-EDF-IMACS-ONERA-Phimeca
6  *
7  *  This library is free software: you can redistribute it and/or modify
8  *  it under the terms of the GNU Lesser General Public License as published by
9  *  the Free Software Foundation, either version 3 of the License, or
10  *  (at your option) any later version.
11  *
12  *  This library is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU Lesser General Public License for more details.
16  *
17  *  You should have received a copy of the GNU Lesser General Public License
18  *  along with this library.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  */
21 #include <cstdlib>
22 #include <cmath>
23 #include <iomanip>
24 #include <algorithm>
25 
26 #include "openturns/RandomMixture.hxx"
27 #include "openturns/SpecFunc.hxx"
28 #include "openturns/PersistentObjectFactory.hxx"
29 #include "openturns/MethodBoundEvaluation.hxx"
30 #include "openturns/Interval.hxx"
31 #include "openturns/Tuples.hxx"
32 #include "openturns/Function.hxx"
33 #include "openturns/Log.hxx"
34 #include "openturns/Triangular.hxx"
35 #include "openturns/Trapezoidal.hxx"
36 #include "openturns/TruncatedDistribution.hxx"
37 #include "openturns/Uniform.hxx"
38 #include "openturns/Exponential.hxx"
39 #include "openturns/Gamma.hxx"
40 #include "openturns/Mixture.hxx"
41 #include "openturns/SmoothedUniform.hxx"
42 #include "openturns/Dirac.hxx"
43 #include "openturns/Bernoulli.hxx"
44 #include "openturns/Binomial.hxx"
45 #include "openturns/Poisson.hxx"
46 #include "openturns/ComplexTensor.hxx"
47 #include "openturns/FFT.hxx"
48 #include "openturns/TBB.hxx"
49 #include "openturns/OSS.hxx"
50 #include "openturns/SobolSequence.hxx"
51 #include "openturns/Os.hxx"
52 
53 BEGIN_NAMESPACE_OPENTURNS
54 
55 TEMPLATE_CLASSNAMEINIT(PersistentCollection<Distribution>)
56 static const Factory<PersistentCollection<Distribution> > Factory_PersistentCollection_Distribution;
57 
58 
59 typedef Collection<Distribution> DistributionCollection;
60 typedef Collection<Point> PointCollection;
61 typedef Collection<Complex> ComplexCollection;
62 
63 
64 CLASSNAMEINIT(RandomMixture)
65 
66 static const Factory<RandomMixture> Factory_RandomMixture;
67 
68 /* Default constructor */
RandomMixture()69 RandomMixture::RandomMixture()
70   : DistributionImplementation()
71   , distributionCollection_()
72   , constant_(Point(1))
73   , weights_()
74   , inverseWeights_()
75   , detWeightsInverse_()
76   , fftAlgorithm_()
77   , isAnalytical_(false)
78   , positionIndicator_(0.0)
79   , isAlreadyComputedPositionIndicator_(false)
80   , dispersionIndicator_(0.0)
81   , isAlreadyComputedDispersionIndicator_(false)
82   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
83   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
84   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
85   , storedSize_(0)
86   , characteristicValuesCache_(0)
87   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
88   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
89   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
90   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
91   , equivalentNormal_()
92   , algo_()
93 {
94   setName("RandomMixture");
95   setDimension(1);
96   DistributionCollection coll(1);
97   setDistributionCollectionAndWeights(coll, Matrix(1, coll.getSize(), Point(coll.getSize(), 1.0)), ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
98   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
99   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
100 }
101 
102 
103 /* Default constructor */
RandomMixture(const DistributionCollection & coll,const Scalar constant)104 RandomMixture::RandomMixture(const DistributionCollection & coll,
105                              const Scalar constant)
106   : DistributionImplementation()
107   , distributionCollection_()
108   , constant_(Point(1, constant))
109   , weights_()
110   , inverseWeights_()
111   , detWeightsInverse_()
112   , fftAlgorithm_()
113   , isAnalytical_(false)
114   , positionIndicator_(0.0)
115   , isAlreadyComputedPositionIndicator_(false)
116   , dispersionIndicator_(0.0)
117   , isAlreadyComputedDispersionIndicator_(false)
118   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
119   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
120   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
121   , storedSize_(0)
122   , characteristicValuesCache_(0)
123   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
124   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
125   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
126   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
127   , equivalentNormal_()
128   , algo_()
129 {
130   setName("RandomMixture");
131   setDimension(1);
132   // We could NOT set distributionCollection_ in the member area of the constructor
133   // because we must check before if the collection is valid (ie, if all the
134   // distributions of the collection have the same dimension). We do this by calling
135   // the setDistributionCollection() method that do it for us.
136   // This call set also the range.
137   setDistributionCollectionAndWeights(coll, Matrix(1, coll.getSize(), Point(coll.getSize(), 1.0)), ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
138   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
139   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
140 }
141 
142 /* Default constructor */
RandomMixture(const DistributionCollection & coll,const Point & weights,const Scalar constant)143 RandomMixture::RandomMixture(const DistributionCollection & coll,
144                              const Point & weights,
145                              const Scalar constant)
146   : DistributionImplementation()
147   , distributionCollection_()
148   , constant_(Point(1, constant))
149   , weights_()
150   , inverseWeights_()
151   , detWeightsInverse_()
152   , fftAlgorithm_()
153   , isAnalytical_(false)
154   , positionIndicator_(0.0)
155   , isAlreadyComputedPositionIndicator_(false)
156   , dispersionIndicator_(0.0)
157   , isAlreadyComputedDispersionIndicator_(false)
158   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
159   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
160   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
161   , storedSize_(0)
162   , characteristicValuesCache_(0)
163   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
164   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
165   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
166   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
167   , equivalentNormal_()
168   , algo_()
169 {
170   setName("RandomMixture");
171   setDimension(1);
172   // We could NOT set distributionCollection_ in the member area of the constructor
173   // because we must check before if the collection is valid (ie, if all the
174   // distributions of the collection have the same dimension). We do this by calling
175   // the setDistributionCollection() method that do it for us.
176   if (weights.getDimension() != coll.getSize()) throw InvalidArgumentException(HERE) << "Error: the weights collection must have the same size as the distribution collection";
177   // This call set also the range.
178   setDistributionCollectionAndWeights(coll, Matrix(1, coll.getSize(), weights), ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
179   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
180   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
181 }
182 
183 /* Parameter constructor - nD */
RandomMixture(const DistributionCollection & coll,const Matrix & weights,const Point & constant)184 RandomMixture::RandomMixture(const DistributionCollection & coll,
185                              const Matrix & weights,
186                              const Point & constant)
187   : DistributionImplementation()
188   , distributionCollection_()
189   , constant_(constant)
190   , weights_()
191   , inverseWeights_()
192   , detWeightsInverse_()
193   , fftAlgorithm_()
194   , isAnalytical_(false)
195   , positionIndicator_(0.0)
196   , isAlreadyComputedPositionIndicator_(false)
197   , dispersionIndicator_(0.0)
198   , isAlreadyComputedDispersionIndicator_(false)
199   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
200   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
201   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
202   , storedSize_(0)
203   , characteristicValuesCache_(0)
204   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
205   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
206   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
207   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
208   , equivalentNormal_()
209   , algo_()
210 {
211   setName("RandomMixture");
212   if (constant.getSize() > 3) throw InvalidDimensionException(HERE) << "RandomMixture only possible for dimension 1,2 or 3";
213   setDimension(constant.getSize());
214   // We could NOT set distributionCollection_ in the member area of the constructor
215   // because we must check before if the collection is valid (ie, if all the
216   // distributions of the collection have the same dimension). We do this by calling
217   // the setDistributionCollection() method that do it for us.
218   if (weights.getNbColumns() != coll.getSize()) throw InvalidArgumentException(HERE) << "Error: the weight matrix must have the same column numbers as the distribution collection's size";
219   if (weights.getNbRows() != constant.getSize()) throw InvalidArgumentException(HERE) << "Error: the weight matrix must have the same row numbers as the distribution dimension";
220   // This call set also the range.
221   setDistributionCollectionAndWeights(coll, weights, ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
222   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
223   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
224 }
225 
226 /* Parameter constructor - nD */
RandomMixture(const DistributionCollection & coll,const Matrix & weights)227 RandomMixture::RandomMixture(const DistributionCollection & coll,
228                              const Matrix & weights)
229   : DistributionImplementation()
230   , distributionCollection_()
231   , constant_()
232   , weights_()
233   , inverseWeights_()
234   , detWeightsInverse_()
235   , fftAlgorithm_()
236   , isAnalytical_(false)
237   , positionIndicator_(0.0)
238   , isAlreadyComputedPositionIndicator_(false)
239   , dispersionIndicator_(0.0)
240   , isAlreadyComputedDispersionIndicator_(false)
241   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
242   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
243   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
244   , storedSize_(0)
245   , characteristicValuesCache_(0)
246   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
247   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
248   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
249   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
250   , equivalentNormal_()
251   , algo_()
252 {
253   setName("RandomMixture");
254   const UnsignedInteger dimension = weights.getNbRows();
255   if (dimension > 3) throw InvalidDimensionException(HERE) << "RandomMixture only possible for dimension 1,2 or 3";
256   constant_ = Point(dimension, 0.0);
257   setDimension(dimension);
258   if (weights.getNbColumns() != coll.getSize()) throw InvalidArgumentException(HERE) << "Error: the weight matrix must have the same column numbers as the distribution collection's size";
259   setDistributionCollectionAndWeights(coll, weights, ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
260   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
261   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
262 }
263 
264 /* Parameter constructor - nD */
RandomMixture(const DistributionCollection & coll,const Sample & weights,const Point & constant)265 RandomMixture::RandomMixture(const DistributionCollection & coll,
266                              const Sample & weights,
267                              const Point & constant)
268   : DistributionImplementation()
269   , distributionCollection_()
270   , constant_(constant)
271   , weights_()
272   , inverseWeights_()
273   , detWeightsInverse_()
274   , fftAlgorithm_()
275   , isAnalytical_(false)
276   , positionIndicator_(0.0)
277   , isAlreadyComputedPositionIndicator_(false)
278   , dispersionIndicator_(0.0)
279   , isAlreadyComputedDispersionIndicator_(false)
280   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
281   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
282   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
283   , storedSize_(0)
284   , characteristicValuesCache_(0)
285   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
286   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
287   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
288   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
289   , equivalentNormal_()
290   , algo_()
291 {
292   setName("RandomMixture");
293   const UnsignedInteger dimension = constant.getSize();
294   if (dimension > 3) throw InvalidDimensionException(HERE) << "RandomMixture only possible for dimension 1,2 or 3";
295   setDimension(dimension);
296   if (weights.getSize() != coll.getSize()) throw InvalidArgumentException(HERE) << "Error: the weight sample must have the same size as the distribution collection's size";
297   if (weights.getDimension() != constant.getDimension()) throw InvalidArgumentException(HERE) << "Error: the weight sample must have the same dimension as the distribution dimension";
298   setDistributionCollectionAndWeights(coll, Matrix(weights.getDimension(), weights.getSize(), weights.getImplementation()->getData()), ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
299   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
300   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
301 }
302 
303 /* Parameter constructor - nD */
RandomMixture(const DistributionCollection & coll,const Sample & weights)304 RandomMixture::RandomMixture(const DistributionCollection & coll,
305                              const Sample & weights)
306   : DistributionImplementation()
307   , distributionCollection_()
308   , weights_()
309   , inverseWeights_()
310   , detWeightsInverse_()
311   , fftAlgorithm_()
312   , isAnalytical_(false)
313   , positionIndicator_(0.0)
314   , isAlreadyComputedPositionIndicator_(false)
315   , dispersionIndicator_(0.0)
316   , isAlreadyComputedDispersionIndicator_(false)
317   , blockMin_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMin" ))
318   , blockMax_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultBlockMax" ))
319   , maxSize_(ResourceMap::GetAsUnsignedInteger( "RandomMixture-DefaultMaxSize"  ))
320   , storedSize_(0)
321   , characteristicValuesCache_(0)
322   , alpha_(ResourceMap::GetAsScalar( "RandomMixture-DefaultAlpha" ))
323   , beta_(ResourceMap::GetAsScalar( "RandomMixture-DefaultBeta" ))
324   , pdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultPDFEpsilon" ))
325   , cdfPrecision_(ResourceMap::GetAsScalar( "RandomMixture-DefaultCDFEpsilon" ))
326   , equivalentNormal_()
327   , algo_()
328 {
329   setName("RandomMixture");
330   const UnsignedInteger dimension = weights.getDimension();
331   if (dimension > 3) throw InvalidDimensionException(HERE) << "RandomMixture only possible for dimension 1,2 or 3";
332   constant_ = Point(dimension, 0.0);
333   setDimension(dimension);
334   if (weights.getSize() != coll.getSize()) throw InvalidArgumentException(HERE) << "Error: the weight sample must have the same size as the distribution collection's size";
335   setDistributionCollectionAndWeights(coll, Matrix(weights.getDimension(), weights.getSize(), weights.getImplementation()->getData()), ResourceMap::GetAsBool("RandomMixture-SimplifyAtoms"));
336   cdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultCDFEpsilon");
337   pdfEpsilon_ = ResourceMap::GetAsScalar("RandomMixture-DefaultPDFEpsilon");
338 }
339 
340 /* Compute the numerical range of the distribution given the parameters values */
computeRange()341 void RandomMixture::computeRange()
342 {
343   const UnsignedInteger size = distributionCollection_.getSize();
344   // First, compute the *exact* range. It will be used to clip the asymptotic range if Poisson's formula is used (ie the collection has a size greater than the dimension)
345   const UnsignedInteger dimension = getDimension();
346   if (dimension == 1 && size == 1)
347   {
348     const Scalar w = weights_(0, 0);
349     const Scalar c = constant_[0];
350     const Scalar a = distributionCollection_[0].getRange().getLowerBound()[0];
351     const Scalar b = distributionCollection_[0].getRange().getUpperBound()[0];
352     const Interval::BoolCollection aFinite = distributionCollection_[0].getRange().getFiniteLowerBound();
353     const Interval::BoolCollection bFinite = distributionCollection_[0].getRange().getFiniteUpperBound();
354     if (w > 0.0)
355     {
356       setRange(Interval(Point(1, c + w * a), Point(1, c + w * b), aFinite, bFinite));
357       return;
358     }
359     setRange(Interval(Point(1, c + w * b), Point(1, c + w * a), bFinite, aFinite));
360     return;
361   } // dimension == 1 && size == 1
362   Interval::BoolCollection finiteLowerBound(dimension);
363   Interval::BoolCollection finiteUpperBound(dimension);
364   Point lowerBound(getDimension());
365   Point upperBound(getDimension());
366   for (UnsignedInteger j = 0; j < dimension; ++j)
367   {
368     Interval range(constant_[j], constant_[j]);
369     for (UnsignedInteger i = 0; i < size; ++i)
370       range += distributionCollection_[i].getRange() * weights_(j, i);
371     lowerBound[j] = range.getLowerBound()[0];
372     upperBound[j] = range.getUpperBound()[0];
373     finiteLowerBound[j] = range.getFiniteLowerBound()[0];
374     finiteUpperBound[j] = range.getFiniteUpperBound()[0];
375   } // j
376   const Interval range(lowerBound, upperBound, finiteLowerBound, finiteUpperBound);
377   if (size <= dimension)
378   {
379     setRange(range);
380     return;
381   } // Analytical case
382   if (dimension == 1)
383   {
384     const Point m(1, getPositionIndicator());
385     const Point s(1, getDispersionIndicator());
386     setRange(range.intersect(Interval(m - s * beta_, m + s * beta_)));
387     return;
388   } // dimension == 1
389   else
390   {
391     Point m(constant_);
392     Point s(getDimension());
393     for (UnsignedInteger j = 0; j < dimension; ++j)
394     {
395       for(UnsignedInteger i = 0; i < size; ++i)
396       {
397         const Scalar mI = distributionCollection_[i].getPositionIndicator();
398         m[j] += weights_(j, i) * mI;
399         const Scalar sI = distributionCollection_[i].getDispersionIndicator();
400         s[j] += std::pow(weights_(j, i) * sI, 2.0);
401       }
402     }
403     for (UnsignedInteger j = 0; j < dimension; ++j) s[j] = std::sqrt(s[j]);
404     setRange(range.intersect(Interval(m - s * beta_, m + s * beta_)));
405     return;
406   } // dimension > 1
407 }
408 
409 /* Comparison operator */
operator ==(const RandomMixture & other) const410 Bool RandomMixture::operator ==(const RandomMixture & other) const
411 {
412   if (this == &other) return true;
413   return (distributionCollection_ == other.distributionCollection_) && (constant_ == other.constant_);
414 }
415 
equals(const DistributionImplementation & other) const416 Bool RandomMixture::equals(const DistributionImplementation & other) const
417 {
418   const RandomMixture* p_other = dynamic_cast<const RandomMixture*>(&other);
419   return p_other && (*this == *p_other);
420 }
421 
422 /* String converter */
__repr__() const423 String RandomMixture::__repr__() const
424 {
425   OSS oss(true);
426   oss << "class=" << RandomMixture::GetClassName()
427       << " name=" << getName()
428       << " distribution collection=" << distributionCollection_
429       << " weights =" << weights_
430       << " constant=" << constant_;
431   return oss;
432 }
433 
434 /* String converter */
__str__(const String & offset) const435 String RandomMixture::__str__(const String & offset) const
436 {
437   OSS oss(false);
438   oss << getClassName() << "(";
439   const UnsignedInteger size = distributionCollection_.getSize();
440   if (dimension_ > 1) oss << "\n";
441   // Print marginal by marginal
442   for (UnsignedInteger marginal = 0; marginal < dimension_; ++ marginal)
443   {
444     // If marginal > 0, alignement
445     if (constant_[marginal] != 0.0) oss << constant_[marginal];
446     for (UnsignedInteger i = 0; i < size; ++i)
447     {
448       const Scalar w = weights_(marginal, i);
449       if ((constant_[marginal] != 0.0) || (i > 0))
450       {
451         if (w > 0.0) oss << " + ";
452         else oss << " - ";
453       }
454       else if (w < 0.0) oss << "-";
455       const String coeff(OSS() << std::abs(w));
456       if (coeff != "1") oss << std::abs(w) << " * ";
457       oss << distributionCollection_[i];
458     }
459     // skip to new line
460     if (dimension_ > 1) oss << Os::GetEndOfLine() << offset;
461   }
462   oss << ")";
463   return oss;
464 }
465 
466 /* Weights distribution accessor */
getWeights() const467 Matrix RandomMixture::getWeights() const
468 {
469   return weights_;
470 }
471 
472 
473 /* Distribution collection accessor */
setDistributionCollectionAndWeights(const DistributionCollection & coll,const Matrix & weights,const Bool simplifyAtoms)474 void RandomMixture::setDistributionCollectionAndWeights(const DistributionCollection & coll,
475     const Matrix & weights,
476     const Bool simplifyAtoms)
477 {
478   weights_ = weights;
479   // Size will be updated during the several treatments of the collection
480   UnsignedInteger size = coll.getSize();
481   if (size == 0) throw InvalidArgumentException(HERE) << "Error: cannot build a RandomMixture based on an empty distribution collection.";
482   // No simplification in the analytical case
483   const UnsignedInteger dimension = getDimension();
484   if (size == dimension && !simplifyAtoms)
485   {
486     isAnalytical_ = true;
487     if (dimension == 1)
488     {
489       detWeightsInverse_ = 1.0 / weights_(0, 0);
490       inverseWeights_ = SquareMatrix(1);
491       inverseWeights_(0, 0) = detWeightsInverse_;
492     }
493     else
494     {
495       inverseWeights_ = weights_.solveLinearSystem(IdentityMatrix(dimension));
496       detWeightsInverse_ = inverseWeights_.getImplementation().get()->computeDeterminant();
497     }
498     Bool isParallel = coll[0].getImplementation()->isParallel();
499     for (UnsignedInteger i = 1; i < dimension; ++i)
500       isParallel = isParallel && coll[i].getImplementation()->isParallel();
501     setParallel(isParallel);
502     distributionCollection_ = coll;
503     isAlreadyComputedMean_ = false;
504     isAlreadyComputedCovariance_ = false;
505     computeRange();
506     // No need to precompute Mean, Covariance, PositionIndicator, DispersionIndicator, ReferenceBandwidth, EquivalentNormal
507     return;
508   }
509   // In 1D case, collection's size might change
510   // When reducing collection to 1, computations become faster
511   // First, flatten all the RandomMixture atoms
512   DistributionCollection atomCandidates(0);
513   // The weights are stored as a collection of scalars, to be read by blocks of size dimension.
514   Sample weightCandidates(0, dimension);
515   LOGDEBUG("Flatten RandomMixture atoms in the current RandomMixture");
516   for (UnsignedInteger i = 0; i < size; ++i)
517   {
518     const Distribution atom(coll[i]);
519     const Matrix w = weights_.getColumn(i);
520     // Skip atoms with null coefficient
521     if (w.computeGram()(0, 0) == 0.0) continue;
522     const String atomKind(atom.getImplementation()->getClassName());
523     if (atom.getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: a RandomMixture cannot be built from a collection of distributions of dimension not equal to 1, here distribution " << i << " has a dimension=" << atom.getDimension();
524     if (atomKind == "RandomMixture" || atomKind == "SmoothedUniform")
525     {
526       // Here we know that the atom is 1D, so we merge a 1D RandomMixture
527       // Get the weight of the atom
528       // Cast the atom into a RandomMixture
529       const RandomMixture * mixture(dynamic_cast< const RandomMixture * >(atom.getImplementation().get()));
530       // Aggregate the constant
531       constant_ += w * mixture->constant_;
532       // Aggregate the weights
533       const Matrix localWeights(w * mixture->weights_);
534       SampleImplementation localWeightsAsSample(localWeights.getNbColumns(), dimension);
535       localWeightsAsSample.setData(*localWeights.getImplementation());
536       weightCandidates.add(localWeightsAsSample);
537       // Aggregate the atoms
538       atomCandidates.add(mixture->getDistributionCollection());
539     } // atom is a RandomMixture
540     else if (atomKind == "TruncatedDistribution")
541     {
542       const TruncatedDistribution * truncatedDistribution(dynamic_cast< const TruncatedDistribution * >(atom.getImplementation().get()));
543       weightCandidates.add(*w.getImplementation());
544       atomCandidates.add(truncatedDistribution->getSimplifiedVersion());
545     }
546     else
547     {
548       weightCandidates.add(*w.getImplementation());
549       atomCandidates.add(atom);
550     } // atom is not a RandomMixture
551   } // Flatten the atoms of RandomMixture type
552   // Update the size
553   size = atomCandidates.getSize();
554   if (simplifyAtoms)
555   {
556     // Second, split the atoms between the discrete ones, the continuous ones and the others
557     // The Dirac atoms are optimized during this step
558     DistributionCollection continuousAtoms(0);
559     Sample continuousWeights(0, dimension);
560     DistributionCollection discreteAtoms(0);
561     Sample discreteWeights(0, dimension);
562     DistributionCollection otherAtoms(0);
563     Sample otherWeights(0, dimension);
564     LOGDEBUG("Sort the atoms into continuous, discrete and others. Also merge Dirac atoms into the constant.");
565     for (UnsignedInteger i = 0; i < size; ++i)
566     {
567       const Distribution atom(atomCandidates[i]);
568       if (atom.getImplementation()->getClassName() == "Dirac")
569       {
570         constant_ += Point(weightCandidates[i]) * atom.getParameter()[0];
571       }
572       else if (atom.isContinuous())
573       {
574         continuousAtoms.add(atom);
575         continuousWeights.add(weightCandidates[i]);
576       }
577       else if (atom.isDiscrete())
578       {
579         discreteAtoms.add(atom);
580         discreteWeights.add(weightCandidates[i]);
581       }
582       else
583       {
584         otherAtoms.add(atom);
585         otherWeights.add(weightCandidates[i]);
586       }
587     } // Split atoms and optimize Dirac
588     LOGDEBUG(OSS() << "Continuous atoms=" << continuousAtoms.__str__() << ", discrete atoms=" << discreteAtoms.__str__() << ", other atoms=" << otherAtoms.__str__());
589     // Third, merge the atoms as much as possible. Most of the optimizations assume a 1D RandomMixture.
590     //
591     // In the case of a nD RandomMixture with n>1:
592     //
593     // + The discrete atoms can be merged into a unique discrete nD UserDefined with a support of reasonable size
594     // + There is no continuous atom, or a unique continuous atom, or all the continuous atoms are Normal
595     // + There is no 'other' atom.
596     // Depending on the continuous atoms, one get:
597     // + A multivariate UserDefined if no continuous atom
598     // + A multivariate Mixture if a unique continuous atom or only continuous Normal atoms (merged into a unique multivariate Normal)
599     // -> all these special cases lead to an analytical expression of the RandomMixture
600     // -> these simplifications will be implemented in a second time
601     //
602     // In the case of a 1D mixture:
603     //
604     // Continuous optimizations:
605     // + The Uniform atoms can be merged two by two into a Trapezoidal or Triangular
606     // + The Exponential, ChiSquare and Gamma atoms can be merged into a Gamma atom if there scale parameters are all equal after standardization (so we must group these atoms by scale*weight parameter). The possible translation has to be accumulated into the constant.
607     // + The Normal atoms can be merged into a unique Normal
608     // + If it remains a Uniform atom and a Normal atom, they can be merged into a SmoothedUniform atom
609     //
610     // Discrete optimizations:
611     // + The Bernoulli and Binomial atoms can be merged into a unique Binomial as soon as they share the same value for p and the same weight
612     // + The Poisson atoms can be merged into a unique Poisson as soon as they share the same weight
613     // + Poisson atoms with opposite weights could be merged into a Skellam atom but it is not clear if it is worth the effort...
614     // + The Discrete atoms can be grouped into Discrete atoms of larger support, if these merged atoms have a support of reasonable size.
615     //
616     // Mixed optimizations:
617     //
618     // + A continuous atom can be merged with a discrete atom to form a Mixture. This simplification can be done for each pair (continuous,discrete). It is not clear if some pairings are to prefer to others.
619     distributionCollection_ = DistributionCollection(0);
620     Sample reducedWeights(0, dimension);
621     if (dimension == 1)
622     {
623       // Optimization of continuous atoms
624       Bool hasNormalAtom = false;
625       Scalar aggregatedMean = 0.0;
626       Scalar aggregatedVariance = 0.0;
627       Bool hasPendingUniform = false;
628       Uniform pendingUniform;
629       // This map will store all the Exponential, ChiSquare and Gamma atoms
630       // For each value of lambda*weight it stores the cummulated k parameter
631       std::map<Scalar, Scalar> gammaMap;
632       for (UnsignedInteger i = 0; i < continuousAtoms.getSize(); ++i)
633       {
634         const Scalar w = continuousWeights(0, i);
635         const Distribution atom(continuousAtoms[i]);
636         const String atomKind(atom.getImplementation()->getClassName());
637         // Knowledge-based optimization
638         // First for 1D atoms
639         if (atomKind == "Uniform")
640         {
641           const Scalar low = atom.getRange().getLowerBound()[0];
642           const Scalar high = atom.getRange().getUpperBound()[0];
643           Scalar a0 = w * low;
644           Scalar b0 = w * high;
645           if (a0 > b0) std::swap(a0, b0);
646           // If there is already a Uniform atom, merge it into a symmetrical Trapezoidal
647           if (hasPendingUniform)
648           {
649             const Scalar a1 = pendingUniform.getA();
650             const Scalar b1 = pendingUniform.getB();
651             const Scalar alpha = a1 + a0;
652             const Scalar delta = b1 + b0;
653             const Scalar halfWidth = 0.5 * std::abs((b1 - a1) - (b0 - a0));
654             const Scalar center = 0.5 * (alpha + delta);
655             // A proper Trapezoidal
656             if (halfWidth > 0.0) distributionCollection_.add(Trapezoidal(alpha, center - halfWidth, center + halfWidth, delta));
657             // A degenerated Trapezoidal, ie a Triangular
658             else distributionCollection_.add(Triangular(alpha, center, delta));
659             // Add a unit weight as its initial weight has been merged into the parameters
660             reducedWeights.add(Point(1, 1.0));
661             hasPendingUniform = false;
662           } // hasPendingUniform
663           else
664           {
665             pendingUniform = Uniform(a0, b0);
666             hasPendingUniform = true;
667           } // !hasPendingUniform
668         } // atom is a Uniform
669         else if (atomKind == "Normal")
670         {
671           hasNormalAtom = true;
672           aggregatedMean += w * atom.getMean()[0];
673           aggregatedVariance += w * w * atom.getCovariance()(0, 0);
674         } // atom is a Normal
675         else if (atomKind == "Exponential")
676         {
677           const Point parameters(atom.getParameter());
678           const Scalar key = parameters[0] / w;
679           std::map<Scalar, Scalar>::iterator it(gammaMap.find(key));
680           // New Gamma
681           if (it == gammaMap.end()) gammaMap[key] = 1.0;
682           // Already known Gamma, update the shape parameter
683           else gammaMap[key] += 1.0;
684           // In any case update the constant
685           constant_ += Point(1, parameters[1]) * w;
686         } // atom is Exponential
687         else if (atomKind == "Gamma")
688         {
689           const Point parameters(atom.getParameter());
690           const Scalar key = parameters[1] / w;
691           std::map<Scalar, Scalar>::iterator it(gammaMap.find(key));
692           // New Gamma
693           if (it == gammaMap.end()) gammaMap[key] = parameters[0];
694           // Already known Gamma, update the shape parameter
695           else gammaMap[key] += parameters[0];
696           // In any case update the constant
697           constant_ += Point(1, parameters[2]) * w;
698         } // atom is Gamma
699         else if (atomKind == "ChiSquare")
700         {
701           const Point parameters(atom.getParameter());
702           const Scalar key = 0.5 / w;
703           std::map<Scalar, Scalar>::iterator it(gammaMap.find(key));
704           // New Gamma
705           if (it == gammaMap.end()) gammaMap[key] = 0.5 * parameters[0];
706           // Already known Gamma, update the shape parameter
707           else gammaMap[key] += 0.5 * parameters[0];
708         } // atom is ChiSquare
709         else
710         {
711           distributionCollection_.add(atom);
712           reducedWeights.add(Point(1, w));
713         } // no simplification known
714       } // Loop over continuous atoms
715       // Set the aggregated normal if any. Note that this atom absorbs the constant.
716       if (hasNormalAtom)
717       {
718         if (hasPendingUniform)
719         {
720           distributionCollection_.add(SmoothedUniform(pendingUniform.getA() + aggregatedMean + constant_[0], pendingUniform.getB() + aggregatedMean + constant_[0], std::sqrt(aggregatedVariance)));
721           constant_[0] = 0.0;
722           // Add a unit weight as its initial weight has been merged into the parameters
723           reducedWeights.add(Point(1, 1.0));
724           // No more pending uniform
725           hasPendingUniform = false;
726         } // hasPendingNormal && hasPendingUniform
727         else
728         {
729           distributionCollection_.add(Normal(aggregatedMean + constant_[0], std::sqrt(aggregatedVariance)));
730           constant_[0] = 0.0;
731           // Add a unit weight as its initial weight has been merged into the parameters
732           reducedWeights.add(Point(1, 1.0));
733         } // hasPendingNormal && !hasPendingUniform
734       } // hasNormalAtom
735       // Set the pending Uniform if any. Note that this atom absorbs the constant if not yet absorbed.
736       if (hasPendingUniform)
737       {
738         if (constant_[0] != 0.0)
739         {
740           pendingUniform = Uniform(pendingUniform.getA() + constant_[0], pendingUniform.getB() + constant_[0]);
741           constant_[0] = 0.0;
742         }
743         distributionCollection_.add(pendingUniform);
744         // Add a unit weight as its initial weight has been merged into the parameters
745         reducedWeights.add(Point(1, 1.0));
746       } // hasPendingUniform
747       // Add the aggregated Gamma if any
748       while (!gammaMap.empty())
749       {
750         const Scalar lambda = gammaMap.begin()->first;
751         const Scalar k = gammaMap.begin()->second;
752         if (k == 1.0) distributionCollection_.add(Exponential(std::abs(lambda)));
753         else distributionCollection_.add(Gamma(k, std::abs(lambda)));
754         reducedWeights.add(Point(1, lambda > 0.0 ? 1.0 : -1.0));
755         gammaMap.erase(gammaMap.begin());
756       } // while Gamma atoms to insert
757       // Remember the index of the first non-continuous atom in order to
758       const UnsignedInteger firstNonContinuousAtom = distributionCollection_.getSize();
759       LOGDEBUG(OSS() << "After simplification of continuous atoms, distributionCollection_=" << distributionCollection_.__str__());
760       // Optimization of discrete atoms
761       // This map will store all the Poisson atoms
762       // For each value of weight it stores the cummulated theta parameter
763       std::map<Scalar, Scalar> poissonMap;
764       // This map will store all the Bernoulli and Binomial atoms
765       // For each value of (p, weight) it stores the cummulated n parameter
766       std::map<Point, UnsignedInteger> binomialMap;
767       for (UnsignedInteger i = 0; i < discreteAtoms.getSize(); ++i)
768       {
769         const Scalar w = discreteWeights(0, i);
770         const Distribution atom(discreteAtoms[i]);
771         const String atomKind(atom.getImplementation()->getClassName());
772         if (atomKind == "Poisson")
773         {
774           const Point parameters(atom.getParameter());
775           const Scalar key = w;
776           std::map<Scalar, Scalar>::iterator it(poissonMap.find(key));
777           // New Poisson
778           if (it == poissonMap.end()) poissonMap[key] = parameters[0];
779           // Already known Poisson, update the count parameter
780           else poissonMap[key] += parameters[0];
781         } // atom is Bernoulli
782         else if (atomKind == "Bernoulli")
783         {
784           const Point parameters(atom.getParameter());
785           Point key(2);
786           key[0] = parameters[0];
787           key[1] = w;
788           std::map<Point, UnsignedInteger>::iterator it(binomialMap.find(key));
789           // New Binomial
790           if (it == binomialMap.end()) binomialMap[key] = 1;
791           // Already known Binomial, update the count parameter
792           else binomialMap[key] += 1;
793         } // atom is Bernoulli
794         else if (atomKind == "Binomial")
795         {
796           const Point parameters(atom.getParameter());
797           Point key(2);
798           key[0] = parameters[1];
799           key[1] = w;
800           std::map<Point, UnsignedInteger>::iterator it(binomialMap.find(key));
801           // New Binomial
802           if (it == binomialMap.end()) binomialMap[key] = static_cast<UnsignedInteger>(parameters[0]);
803           // Already known Binomial, update the count parameter
804           else binomialMap[key] += static_cast<UnsignedInteger>(parameters[0]);
805         } // atom is Binomial
806         else
807         {
808           distributionCollection_.add(atom);
809           reducedWeights.add(Point(1, w));
810         }
811       } // discreteAtoms
812       // Add the aggregated Poisson if any
813       while (!poissonMap.empty())
814       {
815         const Scalar w = poissonMap.begin()->first;
816         const Scalar theta = poissonMap.begin()->second;
817         distributionCollection_.add(Poisson(theta));
818         reducedWeights.add(Point(1, w));
819         poissonMap.erase(poissonMap.begin());
820       } // while Poisson atoms to insert
821       // Add the aggregated Binomial if any
822       while (!binomialMap.empty())
823       {
824         const Scalar p = binomialMap.begin()->first[0];
825         const Scalar w = binomialMap.begin()->first[1];
826         const UnsignedInteger n = binomialMap.begin()->second;
827         if (n == 1) distributionCollection_.add(Bernoulli(p));
828         else distributionCollection_.add(Binomial(n, p));
829         reducedWeights.add(Point(1, w));
830         binomialMap.erase(binomialMap.begin());
831       } // while Binomial atoms to insert
832       LOGDEBUG(OSS() << "After simplification of discrete atoms, distributionCollection_=" << distributionCollection_.__str__());
833       // Now merge the discrete atoms by groups of reasonably sized support
834       // if there is at least 2 discrete atoms
835       const UnsignedInteger maxSupportSize(ResourceMap::GetAsUnsignedInteger("RandomMixture-MaximumSupportSize"));
836       UnsignedInteger firstOtherAtom = distributionCollection_.getSize();
837       // No aggregation if maxSupportSize==0 or if only one discrete atom
838       if (firstOtherAtom > firstNonContinuousAtom + 1 && maxSupportSize > 0)
839       {
840 
841         UnsignedInteger indexAggregated = firstNonContinuousAtom;
842         UnsignedInteger firstDiscreteIndex = firstNonContinuousAtom;
843         Distribution firstDiscrete(distributionCollection_[firstDiscreteIndex]);
844         Sample aggregatedSupport(firstDiscrete.getSupport() * reducedWeights[firstDiscreteIndex]);
845         Point aggregatedProbabilities(firstDiscrete.getProbabilities());
846         UnsignedInteger aggregatedSupportSize = aggregatedSupport.getSize();
847         for (UnsignedInteger secondDiscreteIndex = firstNonContinuousAtom + 1; secondDiscreteIndex < firstOtherAtom; ++secondDiscreteIndex)
848         {
849           const Distribution secondDiscrete(distributionCollection_[secondDiscreteIndex]);
850           const Sample secondSupport(secondDiscrete.getSupport() * reducedWeights[secondDiscreteIndex]);
851           const Point secondProbabilities(secondDiscrete.getProbabilities());
852           const UnsignedInteger secondSupportSize = secondSupport.getSize();
853           const UnsignedInteger newAggregatedSupportSize = aggregatedSupportSize * secondSupportSize;
854           // If the next discrete may lead to a too large support, store the current aggregated discrete atom and go to the next group
855           if (newAggregatedSupportSize > maxSupportSize)
856           {
857             // If several discrete atoms have been merged store the aggregated
858             // atom at the place occuped by the first discrete atom
859             if (secondDiscreteIndex > firstDiscreteIndex + 1)
860             {
861               distributionCollection_[indexAggregated] = UserDefined(aggregatedSupport, aggregatedProbabilities);
862               reducedWeights[indexAggregated] = Point(1, 1.0);
863             }
864             else
865               distributionCollection_[indexAggregated] = firstDiscrete;
866             ++indexAggregated;
867             firstDiscreteIndex = secondDiscreteIndex;
868             firstDiscrete = secondDiscrete;
869             aggregatedSupport = secondSupport;
870             aggregatedProbabilities = secondProbabilities;
871             aggregatedSupportSize = secondSupportSize;
872           } // If the aggregated discrete atom is large enough
873           else
874           {
875             Sample newAggregatedSupportAndProbabilities(newAggregatedSupportSize, 2);
876             UnsignedInteger k = 0;
877             for (UnsignedInteger firstIndex = 0; firstIndex < aggregatedSupportSize; ++firstIndex)
878             {
879               const Scalar xI = aggregatedSupport(firstIndex, 0);
880               const Scalar pI = aggregatedProbabilities[firstIndex];
881               for (UnsignedInteger secondIndex = 0; secondIndex < secondSupportSize; ++secondIndex)
882               {
883                 const Scalar xJ = secondSupport(secondIndex, 0);
884                 const Scalar pJ = secondProbabilities[secondIndex];
885                 newAggregatedSupportAndProbabilities(k, 0) = xI + xJ;
886                 newAggregatedSupportAndProbabilities(k, 1) = pI * pJ;
887                 ++k;
888               } // secondIndex
889             } // firstIndex
890             // Merge the identical points in the support
891             // First, sort the new aggregated data according to the support points
892             newAggregatedSupportAndProbabilities = newAggregatedSupportAndProbabilities.sortAccordingToAComponent(0);
893             // Second, filter out the duplicates in the point space and aggregate the values in the probability space
894             aggregatedSupport = Sample(1, Point(1, newAggregatedSupportAndProbabilities(0, 0)));
895             aggregatedProbabilities = Point(1, newAggregatedSupportAndProbabilities(0, 1));
896             k = 0;
897             for (UnsignedInteger index = 1; index < newAggregatedSupportSize; ++index)
898             {
899               // If the current point is equal to the last one aggregate the probabilities
900               if (newAggregatedSupportAndProbabilities(index, 0) == aggregatedSupport(k, 0))
901               {
902                 aggregatedProbabilities[k] += newAggregatedSupportAndProbabilities(index, 1);
903               } // current point equals to the previous one
904               else
905               {
906                 ++k;
907                 aggregatedSupport.add(Point(1, newAggregatedSupportAndProbabilities(index, 0)));
908                 aggregatedProbabilities.add(newAggregatedSupportAndProbabilities(index, 1));
909               } // current point is different from the previous one
910             } // Loop over the new aggregated support
911             aggregatedSupportSize = aggregatedSupport.getSize();
912           } // Merge the second atom into the aggregated atom
913         } // Loop over the discrete atoms
914         // If there is still something to merge
915         // It can be:
916         // + an aggregated atom with small support (detected because firstDiscreteIndex < firstOtherAtom - 1
917         // + a single atom (the second one, but now equals to the first one) (detected because firstDiscreteIndex == firstOtherAtom - 1)
918         if (firstDiscreteIndex == firstOtherAtom - 1)
919           distributionCollection_[indexAggregated] = firstDiscrete;
920         else
921         {
922           distributionCollection_[indexAggregated] = UserDefined(aggregatedSupport, aggregatedProbabilities);
923           reducedWeights[indexAggregated] = Point(1, 1.0);
924         }
925         // To identify the first discrete atom to remove
926         ++indexAggregated;
927         // Now remove the discrete atoms that have been merged from the list of distributions
928         distributionCollection_.erase(distributionCollection_.begin() + indexAggregated, distributionCollection_.end());
929         reducedWeights.erase(indexAggregated, reducedWeights.getSize());
930         firstOtherAtom = distributionCollection_.getSize();
931       } // If there are discrete atoms to merge
932 
933       // Then perform the continuous/discrete simplification using mixtures
934       // There must be continuous atoms and discrete ones
935       if (firstNonContinuousAtom > 0 && firstNonContinuousAtom != firstOtherAtom)
936       {
937         const SignedInteger firstContinuous = 0;
938         const SignedInteger firstDiscrete = firstNonContinuousAtom;
939         SignedInteger currentContinuous = firstNonContinuousAtom - 1;
940         SignedInteger currentDiscrete = firstOtherAtom - 1;
941         while (currentContinuous >= firstContinuous && currentDiscrete >= firstDiscrete)
942         {
943           const Distribution continuousAtom(distributionCollection_[currentContinuous]);
944           const Scalar continuousWeight = reducedWeights(currentContinuous, 0);
945           Distribution discreteAtom(distributionCollection_[currentDiscrete]);
946           Scalar discreteWeight = reducedWeights(currentDiscrete, 0);
947           const Sample support(discreteAtom.getSupport());
948           DistributionCollection mixtureAtoms;
949           for (UnsignedInteger i = 0; i < support.getSize(); ++i)
950             mixtureAtoms.add(RandomMixture(DistributionCollection(1, continuousAtom), Point(1, continuousWeight), support(i, 0) * discreteWeight));
951           const Point probabilities(discreteAtom.getProbabilities());
952           // Replace the current continuous atom by the Mixture
953           distributionCollection_[currentContinuous] = Mixture(mixtureAtoms, probabilities);
954           // Remove the current discrete atom
955           distributionCollection_.erase(distributionCollection_.begin() + currentDiscrete);
956           reducedWeights.erase(currentDiscrete);
957           --currentContinuous;
958           --currentDiscrete;
959         } // loop over (continuous, discrete) pairs
960       } // continuous and discrete atoms to merge?
961       // No simplification for other atoms
962       distributionCollection_.add(otherAtoms);
963       reducedWeights.add(otherWeights);
964     } // dimension == 1
965     else
966     {
967       distributionCollection_.add(continuousAtoms);
968       reducedWeights.add(continuousWeights);
969       distributionCollection_.add(discreteAtoms);
970       reducedWeights.add(discreteWeights);
971       distributionCollection_.add(otherAtoms);
972       reducedWeights.add(otherWeights);
973     } // dimension > 1
974     // Store the weights in a Matrix format
975     weights_ = Matrix(reducedWeights.getDimension(), reducedWeights.getSize(), reducedWeights.getImplementation()->getData());
976   } // simplify atoms=true
977   else
978   {
979     distributionCollection_ = atomCandidates;
980     // Store the weights in a Matrix format
981     weights_ = Matrix(weightCandidates.getDimension(), weightCandidates.getSize(), weightCandidates.getImplementation()->getData());
982   } // simplify atoms=false
983   // Special case: distributionCollection_ is empty because all the atoms were Dirac distributions, so they have all been merged into the constant. As we need at least one atom for the algorithms to work we convert the constant back into a unique Dirac distribution. This case can occur only in dimension 1
984   if (distributionCollection_.getSize() == 0)
985   {
986     distributionCollection_.add(Dirac(constant_));
987     weights_ = Matrix(1, 1);
988     weights_(0, 0) = 1.0;
989     constant_[0] = 0.0;
990   }
991 
992   // We cannot use parallelism if we have more than two atoms due to the characteristic function cache
993   if (distributionCollection_.getSize() == 1) setParallel(distributionCollection_[0].getImplementation()->isParallel());
994   else if (distributionCollection_.getSize() == 2) setParallel(distributionCollection_[0].getImplementation()->isParallel() && distributionCollection_[1].getImplementation()->isParallel());
995   else setParallel(false);
996   isAlreadyComputedMean_ = false;
997   isAlreadyComputedCovariance_ = false;
998   // Need to precompute Mean, Covariance, PositionIndicator, DispersionIndicator, ReferenceBandwidth, EquivalentNormal only if at least two atoms
999   // Compute the range first, as it is needed for the reference bandwidth
1000   computeRange();
1001   if ((dimension > 1) || (distributionCollection_.getSize() > 1))
1002   {
1003     computeMean();
1004     computeCovariance();
1005     (void) getPositionIndicator();
1006     (void) getDispersionIndicator();
1007     computeReferenceBandwidth();
1008     computeEquivalentNormal();
1009   }
1010   // In 1D case, collection's size might change
1011   // When reducing collection to 1, computations become faster
1012   if (distributionCollection_.getSize() == dimension)
1013   {
1014     inverseWeights_ = weights_.solveLinearSystem(IdentityMatrix(dimension));
1015     isAnalytical_ = true;
1016     detWeightsInverse_ = inverseWeights_.getImplementation().get()->computeDeterminant();
1017   }
1018 }
1019 
1020 /* Constant accessor */
setConstant(const Point & constant)1021 void RandomMixture::setConstant(const Point & constant)
1022 {
1023   if (constant != constant_)
1024   {
1025     if (constant.getSize() != getDimension()) throw InvalidArgumentException(HERE) << "Error: the constant term must have the same dimension as the distribution";
1026     constant_ = constant;
1027     isAlreadyComputedMean_ = false;
1028     // The covariance does not depend on the constant
1029     computeRange();
1030   }
1031 }
1032 
getConstant() const1033 Point RandomMixture::getConstant() const
1034 {
1035   return constant_;
1036 }
1037 
1038 /* Distribution collection accessor */
getDistributionCollection() const1039 DistributionCollection RandomMixture::getDistributionCollection() const
1040 {
1041   return distributionCollection_;
1042 }
1043 
1044 /* FFT algorithm accessor */
getFFTAlgorithm() const1045 FFT RandomMixture::getFFTAlgorithm() const
1046 {
1047   return fftAlgorithm_;
1048 }
1049 
1050 /* FFT algorithm accessor */
setFFTAlgorithm(const FFT & fft)1051 void RandomMixture::setFFTAlgorithm(const FFT & fft)
1052 {
1053   fftAlgorithm_ = fft;
1054 }
1055 
1056 
1057 /* Virtual constructor */
clone() const1058 RandomMixture * RandomMixture::clone() const
1059 {
1060   return new RandomMixture(*this);
1061 }
1062 
1063 /* Get one realization of the RandomMixture */
getRealization() const1064 Point RandomMixture::getRealization() const
1065 {
1066   const UnsignedInteger size = distributionCollection_.getSize();
1067   Point realization(size);
1068   for (UnsignedInteger i = 0; i < size; ++i) realization[i] = distributionCollection_[i].getRealization()[0];
1069   return weights_ * realization + constant_;
1070 }
1071 
1072 /* Get a Sample of the RandomMixture */
getSample(const UnsignedInteger size) const1073 Sample RandomMixture::getSample(const UnsignedInteger size) const
1074 {
1075   const UnsignedInteger atomSize = distributionCollection_.getSize();
1076   Sample sample(atomSize, size);
1077   for (UnsignedInteger i = 0; i < atomSize; ++i)
1078   {
1079     sample[i] = distributionCollection_[i].getSample(size).asPoint();
1080   }
1081   return weights_.getImplementation()->genSampleProd(sample, true, true, 'R') + constant_;
1082 }
1083 
getSampleByQMC(const UnsignedInteger size) const1084 Sample RandomMixture::getSampleByQMC(const UnsignedInteger size) const
1085 {
1086   const UnsignedInteger atomSize = distributionCollection_.getSize();
1087   Sample sample(atomSize, size);
1088   const Point u(SobolSequence(1).generate(size).getImplementation()->getData());
1089   for (UnsignedInteger i = 0; i < atomSize; ++i)
1090   {
1091     sample[i] = distributionCollection_[i].computeQuantile(u).asPoint();
1092   }
1093   return weights_.getImplementation()->genSampleProd(sample, true, true, 'R') + constant_;
1094 }
1095 
1096 /* Get the DDF of the RandomMixture */
computeDDF(const Point & point) const1097 Point RandomMixture::computeDDF(const Point & point) const
1098 {
1099   return DistributionImplementation::computeDDF(point);
1100 }
1101 
1102 /* Integration kernels for the convolution in the 1D case with 2 continuous atoms */
1103 namespace
1104 {
1105 // Class used to wrap the kernel of the integral defining the PDF of the convolution
1106 class PDFKernelRandomMixture: public UniVariateFunctionImplementation
1107 {
1108 public:
PDFKernelRandomMixture(const Scalar alpha1,const Scalar alpha2,const Pointer<DistributionImplementation> & p_atom1,const Pointer<DistributionImplementation> & p_atom2,const Scalar z0)1109   PDFKernelRandomMixture(const Scalar alpha1,
1110                          const Scalar alpha2,
1111                          const Pointer<DistributionImplementation> & p_atom1,
1112                          const Pointer<DistributionImplementation> & p_atom2,
1113                          const Scalar z0)
1114     : UniVariateFunctionImplementation()
1115     , alpha1_(alpha1)
1116     , alpha2_(alpha2)
1117     , p_atom1_(p_atom1)
1118     , p_atom2_(p_atom2)
1119     , z0_(z0)
1120   {
1121     // Nothing to do
1122   };
1123 
clone() const1124   PDFKernelRandomMixture * clone() const
1125   {
1126     return new PDFKernelRandomMixture(*this);
1127   }
1128 
1129   // Z = alpha0 + alpha1 X1 + alpha2 X2
operator ()(const Scalar u) const1130   Scalar operator() (const Scalar u) const
1131   {
1132     const Scalar pdf = p_atom1_->computePDF(u);
1133     if (pdf == 0.0) return 0.0;
1134     return pdf * p_atom2_->computePDF((z0_ - alpha1_ * u) / alpha2_);
1135   }
1136 
1137 private:
1138   const Scalar alpha1_;
1139   const Scalar alpha2_;
1140   const Pointer<DistributionImplementation> p_atom1_;
1141   const Pointer<DistributionImplementation> p_atom2_;
1142   const Scalar z0_;
1143 
1144 }; // class PDFKernelRandomMixture
1145 
1146 // Class used to wrap the kernel of the integral defining the CDF of the convolution
1147 class CDFKernelRandomMixture: public UniVariateFunctionImplementation
1148 {
1149 public:
CDFKernelRandomMixture(const Scalar alpha1,const Scalar alpha2,const Pointer<DistributionImplementation> & p_atom1,const Pointer<DistributionImplementation> & p_atom2,const Scalar z0)1150   CDFKernelRandomMixture(const Scalar alpha1,
1151                          const Scalar alpha2,
1152                          const Pointer<DistributionImplementation> & p_atom1,
1153                          const Pointer<DistributionImplementation> & p_atom2,
1154                          const Scalar z0)
1155     : UniVariateFunctionImplementation()
1156     , alpha1_(alpha1)
1157     , alpha2_(alpha2)
1158     , p_atom1_(p_atom1)
1159     , p_atom2_(p_atom2)
1160     , z0_(z0)
1161   {
1162     // Nothing to do
1163   };
1164 
clone() const1165   CDFKernelRandomMixture * clone() const
1166   {
1167     return new CDFKernelRandomMixture(*this);
1168   }
1169 
1170   // Z = alpha0 + alpha1 X1 + alpha2 X2
operator ()(const Scalar u) const1171   Scalar operator() (const Scalar u) const
1172   {
1173     const Scalar pdf = p_atom1_->computePDF(u);
1174     if (pdf == 0.0) return 0.0;
1175     return pdf * p_atom2_->computeCDF((z0_ - alpha1_ * u) / alpha2_);
1176   }
1177 
1178 private:
1179   const Scalar alpha1_;
1180   const Scalar alpha2_;
1181   const Pointer<DistributionImplementation> p_atom1_;
1182   const Pointer<DistributionImplementation> p_atom2_;
1183   const Scalar z0_;
1184 
1185 }; // class CDFKernelRandomMixture
1186 
1187 // Class used to wrap the kernel of the integral defining the complementary CDF of the convolution
1188 class ComplementaryCDFKernelRandomMixture: public UniVariateFunctionImplementation
1189 {
1190 public:
ComplementaryCDFKernelRandomMixture(const Scalar alpha1,const Scalar alpha2,const Pointer<DistributionImplementation> & p_atom1,const Pointer<DistributionImplementation> & p_atom2,const Scalar z0)1191   ComplementaryCDFKernelRandomMixture(const Scalar alpha1,
1192                                       const Scalar alpha2,
1193                                       const Pointer<DistributionImplementation> & p_atom1,
1194                                       const Pointer<DistributionImplementation> & p_atom2,
1195                                       const Scalar z0)
1196     : UniVariateFunctionImplementation()
1197     , alpha1_(alpha1)
1198     , alpha2_(alpha2)
1199     , p_atom1_(p_atom1)
1200     , p_atom2_(p_atom2)
1201     , z0_(z0)
1202   {
1203     // Nothing to do
1204   };
1205 
clone() const1206   ComplementaryCDFKernelRandomMixture * clone() const
1207   {
1208     return new ComplementaryCDFKernelRandomMixture(*this);
1209   }
1210 
1211   // Z = alpha0 + alpha1 X1 + alpha2 X2
operator ()(const Scalar u) const1212   Scalar operator() (const Scalar u) const
1213   {
1214     const Scalar pdf = p_atom1_->computePDF(u);
1215     if (pdf == 0.0) return 0.0;
1216     return pdf * p_atom2_->computeComplementaryCDF((z0_ - alpha1_ * u) / alpha2_);
1217   }
1218 
1219 private:
1220   const Scalar alpha1_;
1221   const Scalar alpha2_;
1222   const Pointer<DistributionImplementation> p_atom1_;
1223   const Pointer<DistributionImplementation> p_atom2_;
1224   const Scalar z0_;
1225 
1226 }; // class ComplementaryCDFKernelRandomMixture
1227 } // namespace
1228 
1229 /* Get the PDF of the RandomMixture. It uses the Poisson inversion formula as described in the reference:
1230    "Abate, J. and Whitt, W. (1992). The Fourier-series method for inverting
1231    transforms of probability distributions. Queueing Systems 10, 5--88., 1992",
1232    formula 5.5.
1233    We use an incremental update of the trigonometric functions and reduce the complex arithmetic to a real
1234    arithmetic for performance purpose.
1235 
1236    Here, we recall the Poisson summation formula:
1237    \sum_{k\in Z}p(x+2k\pi/h) = h/2\pi\sum_{j\in Z}\phi(jh)\exp(-Ihjx)
1238    We can rewrite this formula as:
1239    \sum_{k\in Z}p(x+2k\pi/h) = h/2\pi\sum_{j\in Z}\left[\phi(jh) - \psi(jh)\right]\exp(-Ihjx) + \sum_{k\in Z}q(x+2k\pi/h),
1240    where q is the PDF and \psi the characteristic function of the normal distribution with the same mean and
1241    the same variance as the mixture. Take h such as p(x+2k\pi/h) << p(x) for k\neq 0, then:
1242    p(x) \simeq h/2\pi\sum_{j\in Z}\left[\phi(jh) - \psi(jh)\right]\exp(-Ihjx) + \sum_{k\in Z}q(x+2k\pi/h).
1243    The second sum \sum_{k\in Z}q(x+2k\pi/h) will be approximated using only few terms, as the condition on h will almost
1244    gives q(x+2k\pi/h) << q(x) for k\neq 0. Call this sum Q(x, h), and define \delta as delta(t) = \phi(t) - \psi(t).
1245    We unroll the complex arithmetic for performance purpose:
1246    p(x) \simeq h/2\pi\sum_{j\neq 0}\delta(jh)\exp(-Ihjx) + Q(x, h) as \delta(0) = 0
1247    \simeq h/\pi\sum_{j>0} Re(\delta(jh)) * cos(jhx) + Im(\delta(jh)) * sin(jhx) + Q(x, h)
1248 */
computePDF(const Point & point) const1249 Scalar RandomMixture::computePDF(const Point & point) const
1250 {
1251   if (point.getDimension() != dimension_)
1252     throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension_ << ", here dimension=" << point.getDimension();
1253 
1254   if (isAnalytical_)
1255   {
1256     // compute analytically the pdf
1257     const Point u(point - constant_);
1258     const Point Qu(inverseWeights_ * u);
1259     // Scaling factor is 1 for discrete PDF and inverse of the Jacobian for continuous PDF
1260     Scalar value = (isDiscrete() ? 1.0 : std::abs(detWeightsInverse_));
1261     for (UnsignedInteger j = 0; j < dimension_; ++j) value *= distributionCollection_[j].computePDF(Qu[j]);
1262     return value;
1263   } // isAnalytical_
1264 
1265   // Check range
1266   // We check that point is in range, excepted bounds
1267   // In bounds, value is 0.0
1268   const Interval range(getRange());
1269   const Point lowerBound(range.getLowerBound());
1270   const Point upperBound(range.getUpperBound());
1271   for (UnsignedInteger j = 0; j < dimension_; ++j)
1272     if ((point[j] <= lowerBound[j]) || (point[j] >= upperBound[j])) return 0.0;
1273   // Check for exotic situations not yet implemented by the class
1274   if (!isContinuous() && distributionCollection_.getSize() > 1) throw NotYetImplementedException(HERE) << "Error: no algorithm is currently available for the non-continuous case with more than one atom.";
1275   // Special case for 1D distributions with exactly 2 continuous atoms
1276   if ((dimension_ == 1) && (distributionCollection_.getSize() == 2) && distributionCollection_[0].isContinuous() && distributionCollection_[1].isContinuous())
1277   {
1278     // Get the parameters of the random mixture
1279     const Scalar z0 = point[0] - constant_[0];
1280     const Scalar alpha1 = weights_(0, 0);
1281     const Scalar alpha2 = weights_(0, 1);
1282     // Get the bounds of the atoms
1283     const Scalar a = distributionCollection_[0].getRange().getLowerBound()[0];
1284     const Scalar b = distributionCollection_[0].getRange().getUpperBound()[0];
1285     const Scalar c = distributionCollection_[1].getRange().getLowerBound()[0];
1286     const Scalar d = distributionCollection_[1].getRange().getUpperBound()[0];
1287     // Compute the bounds of the convolution
1288     Scalar lower = -1.0;
1289     Scalar upper = -1.0;
1290     Scalar uc = (z0 - alpha2 * c) / alpha1;
1291     Scalar ud = (z0 - alpha2 * d) / alpha1;
1292     if ((alpha1 > 0) == (alpha2 > 0))
1293     {
1294       lower = std::max(a, ud);
1295       upper = std::min(b, uc);
1296     }
1297     else
1298     {
1299       lower = std::max(a, uc);
1300       upper = std::min(b, ud);
1301     }
1302     const PDFKernelRandomMixture convolutionKernel(alpha1, alpha2, distributionCollection_[0].getImplementation(), distributionCollection_[1].getImplementation(), z0);
1303     return algo_.integrate(convolutionKernel, lower, upper) / std::abs(alpha2);
1304   }
1305 
1306   LOGDEBUG(OSS() << "Equivalent normal=" << equivalentNormal_);
1307   // We unroll the complex arithmetic and we perform incremental update in order to improve the performances
1308   Point two_pi_on_h(dimension_);
1309   for (UnsignedInteger k = 0; k < dimension_; ++k) two_pi_on_h[k] = 2.0 * M_PI / referenceBandwidth_[k];
1310   UnsignedInteger levelMax = 0;
1311   Scalar value = computeEquivalentNormalPDFSum(point, two_pi_on_h, 0, levelMax);
1312 
1313   UnsignedInteger k = 1;
1314   const Scalar precision = pdfPrecision_;
1315   const UnsignedInteger kmin = 1 << blockMin_;
1316   const UnsignedInteger kmax = 1 << blockMax_;
1317   // hX is only useful in 1D
1318   Scalar hX = referenceBandwidth_[0] * point[0];
1319   Scalar error = 2.0 * precision;
1320   LOGDEBUG(OSS() << std::setprecision(20) << "h=" << referenceBandwidth_ << ", equivalent normal pdf sum=" << value << ", k=" << k << ", precision=" << precision << ", kmin=" << kmin << ", kmax=" << kmax << ", error=" << error);
1321   while ( (k < kmin) || ( (k < kmax) && (error > precision) ) )
1322   {
1323     Scalar sumContributions = 0.0;
1324     error = 0.0;
1325     for (UnsignedInteger m = k; m < 2 * k; ++m)
1326     {
1327       if (dimension_ == 1)
1328       {
1329         const Scalar sinMHX = std::sin(m * hX);
1330         const Scalar cosMHX = std::cos(m * hX);
1331         const Complex deltaValue(computeDeltaCharacteristicFunction(m));
1332         const Scalar contribution = (deltaValue.real() * cosMHX + deltaValue.imag() * sinMHX);
1333         LOGDEBUG(OSS() << "m=" << m << ", delta=" << deltaValue << ", contribution=" << contribution);
1334         sumContributions += contribution;
1335         error += std::abs(contribution);
1336       } // dimension_ == 1
1337       else
1338       {
1339         Sample skinPoints(gridMesher_.getPoints(m));
1340         const UnsignedInteger fromIndex = gridMesher_.getOffsetLevel(m);
1341         const UnsignedInteger lastIndex = gridMesher_.getOffsetLevel(m + 1) - 1;
1342         if (lastIndex <= maxSize_)
1343         {
1344           if (lastIndex > storedSize_)
1345             updateCacheDeltaCharacteristicFunction(skinPoints);
1346           // Level is now entirely on cache
1347           for (UnsignedInteger i = 0; i < skinPoints.getSize(); ++i)
1348           {
1349             const Complex deltaValue(characteristicValuesCache_[fromIndex + i - 1]);
1350             hX = 0.0;
1351             for (UnsignedInteger j = 0; j < dimension_; ++j) hX += skinPoints(i, j) * point[j];
1352             const Scalar sinHX = std::sin(hX);
1353             const Scalar cosHX = std::cos(hX);
1354             const Scalar contribution = deltaValue.real() * cosHX + deltaValue.imag() * sinHX;
1355             error += std::abs(contribution);
1356             sumContributions += contribution;
1357             LOGDEBUG(OSS() << "m=" << m << ", delta=" << deltaValue << ", contribution=" << contribution << ", error=" << error);
1358           } // skinPoints
1359         } // lastIndex <= maxSize_
1360         else
1361         {
1362           Point pti(dimension_);
1363           for (UnsignedInteger i = 0; i < skinPoints.getSize(); ++i)
1364           {
1365             hX = 0.0;
1366             for (UnsignedInteger j = 0; j < dimension_; ++j)
1367             {
1368               pti[j] = skinPoints(i, j);
1369               hX += skinPoints(i, j) * point[j];
1370             }
1371             const Complex deltaValue(computeDeltaCharacteristicFunction(pti));
1372             const Scalar sinHX = std::sin(hX);
1373             const Scalar cosHX = std::cos(hX);
1374             const Scalar contribution = deltaValue.real() * cosHX + deltaValue.imag() * sinHX;
1375             error += std::abs(contribution);
1376             sumContributions += contribution;
1377             LOGDEBUG(OSS() << "m=" << m << ", delta=" << deltaValue << ", contribution=" << contribution << ", error=" << error);
1378           } // skinPoints
1379         } // lastIndex > maxSize_
1380       } // dimension > 1
1381     }
1382     error *= referenceBandwidthFactor_;
1383     sumContributions *= referenceBandwidthFactor_;
1384     if (gridMesher_.isSymmetric())
1385     {
1386       error *= 2.0;
1387       sumContributions *= 2.0;
1388     }
1389     value += sumContributions;
1390     k *= 2;
1391   } // while
1392   // For very low level of PDF, the computed value can be slightly negative. Round it up to zero.
1393   if (value < 0.0) value = 0.0;
1394   return value;
1395 }
1396 
1397 /*  Compute the PDF of 1D distributions over a regular grid. The precision is reduced as this method is for drawing purpose only. */
computePDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,Sample & grid) const1398 Sample RandomMixture::computePDF(const Scalar xMin,
1399                                  const Scalar xMax,
1400                                  const UnsignedInteger pointNumber,
1401                                  Sample & grid) const
1402 {
1403   if (getDimension() != 1) throw InvalidDimensionException(HERE) << "Error: this method is available only for 1D distribution";
1404   return computePDF(Point(1, xMin), Point(1, xMax), Indices(1, pointNumber), grid);
1405 }
1406 
1407 struct EquivalentNormalPDFSumPolicy
1408 {
1409   const RandomMixture & mixture_;
1410   const Sample & grid_;
1411   const Point & two_b_sigma_;
1412   const UnsignedInteger levelMax_;
1413   Collection<Scalar> & output_;
1414 
EquivalentNormalPDFSumPolicyEquivalentNormalPDFSumPolicy1415   EquivalentNormalPDFSumPolicy(const RandomMixture & mixture,
1416                                const Sample & grid,
1417                                const Point & two_b_sigma,
1418                                const UnsignedInteger levelMax,
1419                                Collection<Scalar> & output)
1420     : mixture_(mixture)
1421     , grid_(grid)
1422     , two_b_sigma_(two_b_sigma)
1423     , levelMax_(levelMax)
1424     , output_(output)
1425   {}
1426 
operator ()EquivalentNormalPDFSumPolicy1427   inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1428   {
1429     UnsignedInteger fakeLevelMax = 0;
1430     for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1431     {
1432       output_[i] = mixture_.computeEquivalentNormalPDFSum(grid_[i], two_b_sigma_, levelMax_, fakeLevelMax);
1433     }
1434   }
1435 }; /* end struct EquivalentNormalPDFSumPolicy */
1436 
1437 /* Compute the PDF of nD distributions over a regular grid */
computePDF(const Point & xMin,const Point & xMax,const Indices & pointNumber,Sample & grid) const1438 Sample RandomMixture::computePDF(const Point & xMin,
1439                                  const Point & xMax,
1440                                  const Indices & pointNumber,
1441                                  Sample & grid) const
1442 {
1443   if (xMin.getDimension() != xMax.getDimension()) throw InvalidArgumentException(HERE) << "Error: the two corner points must have the same dimension. Here, dim(xMin)=" << xMin.getDimension() << " and dim(xMax)=" << xMax.getDimension();
1444   if (xMin.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the corner points must have the same dimension as the distribution. Here, dim(xMin)=" << xMin.getDimension() << " and distribution dimension=" << dimension_;
1445   if (dimension_ != pointNumber.getSize()) throw InvalidArgumentException(HERE) << "Error: the discretization must match the distribution dimension. Here, dim(discretization)=" << pointNumber.getSize() << " and distribution dimension=" << dimension_;
1446   if (xMin == xMax) throw InvalidArgumentException(HERE) << "Error: xMin & xMax should be different to define a grid";
1447   if (isAnalytical_ && (dimension_ == 1)) return DistributionImplementation::computePDF(xMin, xMax, pointNumber, grid);
1448   IndicesCollection indices(Tuples(pointNumber).generate());
1449 
1450   if (dimension_ < 1 || dimension_ > 3) throw InvalidArgumentException(HERE) << "Error: dimension must be 1, 2 or 3; here dimension=" << dimension_;
1451   // Special case for 1D distributions with exactly 2 atoms
1452   if ((dimension_ == 1) && (distributionCollection_.getSize() == 2))
1453   {
1454     const Scalar a = xMin[0];
1455     const Scalar b = xMax[0];
1456     const UnsignedInteger n = pointNumber[0];
1457     grid = Sample(n, 1);
1458     Sample pdf(n, 1);
1459     for (UnsignedInteger i = 0; i < n; ++i)
1460     {
1461       const Scalar x = a + i * (b - a) / (n - 1);
1462       grid(i, 0) = x;
1463       pdf(i, 0) = computePDF(x);
1464     }
1465     return pdf;
1466   } // dimension == 1 && size == 2
1467   const Point mu(getMean());
1468   const Interval bounds(xMin, xMax);
1469   //if (!bounds.contains(mu)) throw InvalidArgumentException(HERE) << "Error: requested interval does not contain mean=" << mu;
1470 
1471   const Point sigma(getStandardDeviation());
1472   UnsignedInteger b = 0;
1473   for(UnsignedInteger i = 0; i < dimension_; ++i)
1474   {
1475     const Scalar dx = std::max(mu[i] - xMin[i], xMax[i] - mu[i]);
1476     b = std::max(b, static_cast<UnsignedInteger>(std::ceil(dx / sigma[i])));
1477   }
1478   const Point b_sigma(b * sigma);
1479   const Point two_b_sigma(2.0 * b_sigma);
1480 
1481   Point h(dimension_);
1482   Point tau(dimension_);
1483   for(UnsignedInteger i = 0; i < dimension_; ++i)
1484   {
1485     h[i] = M_PI / b_sigma[i];
1486     tau[i] = mu[i] / b_sigma[i];
1487   }
1488   const UnsignedInteger size = indices.getSize();
1489   grid = Sample(indices.getSize(), dimension_);
1490   for (UnsignedInteger i = 0; i < size; ++i)
1491     for (UnsignedInteger j = 0; j < dimension_; ++j)
1492       grid(i, j) = mu[j] + ((2.0 * indices(i, j) + 1.0) / pointNumber[j] - 1.0) * b_sigma[j];
1493 
1494   LOGWARN(OSS() << "Warning! Grid is modified: xMin=" << grid[0] << " xMax=" << grid[size - 1] << " instead of xMin=" << xMin << ", xMax=" << xMax);
1495 
1496   Sample result(size, 1);
1497   // Special case when the distribution is analytical
1498   if (isAnalytical_) return computePDF(grid);
1499   UnsignedInteger levelMax = 0;
1500   // Compute Gaussian sum pdf
1501   // First compute levelMax on mu, to speed up calls to computeEquivalentNormalPDFSum
1502   (void) computeEquivalentNormalPDFSum(mu, two_b_sigma, 0, levelMax);
1503 
1504   Collection<Scalar> output(size);
1505   const  EquivalentNormalPDFSumPolicy policyGrid(*this, grid, two_b_sigma, levelMax, output);
1506   TBB::ParallelFor( 0, size, policyGrid);
1507 
1508   result.getImplementation()->setData(output);
1509 
1510   // Methods below will call computeDeltaCharacteristicFunction() on different threads
1511   // if using TBB, which in turn calls equivalentNormal_.computeCharacteristicFunction()
1512   // and then equivalentNormal_.getCovariance().  But covariance is lazily evaluated.
1513   // We must ensure that it is computed before entering TBB multithreaded section.
1514   (void) equivalentNormal_.getCovariance();
1515 
1516   switch(dimension_)
1517   {
1518     case 1:
1519       addPDFOn1DGrid(pointNumber, h, tau, result);
1520       break;
1521     case 2:
1522       addPDFOn2DGrid(pointNumber, h, tau, result);
1523       break;
1524     case 3:
1525       addPDFOn3DGrid(pointNumber, h, tau, result);
1526       break;
1527   }
1528   for (UnsignedInteger j = 0; j < size; ++j)
1529   {
1530     result(j, 0) = std::max(0.0, result(j, 0));
1531   }
1532   return result;
1533 }
1534 
1535 struct AddPDFOn1DGridPolicy
1536 {
1537   const RandomMixture & mixture_;
1538   const Point & xPoints_;
1539   Collection<Complex> & output_;
1540 
AddPDFOn1DGridPolicyAddPDFOn1DGridPolicy1541   AddPDFOn1DGridPolicy(const RandomMixture & mixture,
1542                        const Point & xPoints,
1543                        Collection<Complex> & output)
1544     : mixture_(mixture)
1545     , xPoints_(xPoints)
1546     , output_(output)
1547   {}
1548 
operator ()AddPDFOn1DGridPolicy1549   inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1550   {
1551     Point x(1);
1552     for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1553     {
1554       x[0] = xPoints_[i];
1555       output_[i] = mixture_.computeDeltaCharacteristicFunction(x);
1556     }
1557   }
1558 }; /* end struct AddPDFOn1DGridPolicy */
1559 
addPDFOn1DGrid(const Indices & pointNumber,const Point & h,const Point & tau,Sample & result) const1560 void RandomMixture::addPDFOn1DGrid(const Indices & pointNumber, const Point & h, const Point & tau, Sample & result) const
1561 {
1562   if (pointNumber.getSize() != 1) throw InvalidArgumentException(HERE) << "Error: the given indices must have dimension=1, here dimension=" << pointNumber.getSize();
1563 
1564   const UnsignedInteger N = pointNumber[0];
1565   Collection<Complex> fx(N);
1566   Collection<Complex> z_exp(N);
1567   const Complex cOne(0.0, 1.0);
1568   // Grid points
1569   Point xPlus(N);
1570   for (UnsignedInteger i = 0; i < N; ++i)
1571   {
1572     xPlus[i] = (i + 1) * h[0];
1573     fx[i] = std::exp(- M_PI * cOne * (tau[0] - 1.0 + 1.0 / N) * (1.0 + i));
1574     z_exp[i] = std::exp(- 2.0 * M_PI * cOne * static_cast<Scalar>(i) / static_cast<Scalar>(N));
1575   }
1576 
1577   // FFT 1D
1578   Collection<Complex> yk(N);
1579   // 1) compute \Sigma_+
1580   const  AddPDFOn1DGridPolicy policyGridPP(*this, xPlus, yk);
1581   TBB::ParallelFor( 0, N, policyGridPP);
1582   for (UnsignedInteger j = 0; j < N; ++j)
1583     yk[j] *= fx[j];
1584 
1585   Collection<Complex> sigma_plus(fftAlgorithm_.transform(yk));
1586 
1587   for (UnsignedInteger j = 0; j < N; ++j)
1588     sigma_plus[j] *= z_exp[j];
1589 
1590   // 2) compute \Sigma_-
1591   Collection<Complex> ykc(N);
1592   for (UnsignedInteger j = 0; j < N; ++j)
1593     ykc[j] = std::conj(yk[N - 1 - j]);
1594 
1595   Collection<Complex> sigma_minus(fftAlgorithm_.transform(ykc));
1596 
1597   const Scalar scaling = h[0] / (2.0 * M_PI);
1598   for (UnsignedInteger j = 0; j < N; ++j)
1599   {
1600     result(j, 0) += scaling * std::real( sigma_plus[j] + sigma_minus[j] );
1601   }
1602 }
1603 
1604 struct AddPDFOn2DGridPolicy
1605 {
1606   const RandomMixture & mixture_;
1607   const Point & xPoints_;
1608   const Point & yPoints_;
1609   const UnsignedInteger nx_;
1610   const UnsignedInteger ny_;
1611   Collection<Complex> & output_;
1612 
AddPDFOn2DGridPolicyAddPDFOn2DGridPolicy1613   AddPDFOn2DGridPolicy(const RandomMixture & mixture,
1614                        const Point & xPoints,
1615                        const Point & yPoints,
1616                        Collection<Complex> & output)
1617     : mixture_(mixture)
1618     , xPoints_(xPoints)
1619     , yPoints_(yPoints)
1620     , nx_(xPoints.getDimension())
1621     , ny_(yPoints.getDimension())
1622     , output_(output)
1623   {}
1624 
operator ()AddPDFOn2DGridPolicy1625   inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1626   {
1627     Point x(2);
1628     for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1629     {
1630       const UnsignedInteger jj = i / nx_;
1631       const UnsignedInteger ii = i - jj * nx_;
1632       x[0] = xPoints_[ii];
1633       x[1] = yPoints_[jj];
1634       output_[i] = mixture_.computeDeltaCharacteristicFunction(x);
1635     }
1636   }
1637 }; /* end struct AddPDFOn2DGridPolicy */
1638 
addPDFOn2DGrid(const Indices & pointNumber,const Point & h,const Point & tau,Sample & result) const1639 void RandomMixture::addPDFOn2DGrid(const Indices & pointNumber, const Point & h, const Point & tau, Sample & result) const
1640 {
1641   if (pointNumber.getSize() != 2) throw InvalidArgumentException(HERE) << "Error: the given indices must have dimension=2, here dimension=" << pointNumber.getSize();
1642 
1643   const UnsignedInteger Nx = pointNumber[0];
1644   const UnsignedInteger Ny = pointNumber[1];
1645   Collection<Complex> fx(Nx);
1646   Collection<Complex> fy(Ny);
1647   Collection<Complex> z_exp_mx(Nx);
1648   Collection<Complex> z_exp_my(Ny);
1649   const Complex cOne(0.0, 1.0);
1650   for (UnsignedInteger i = 0; i < Nx; ++i)
1651   {
1652     fx[i] = std::exp(- M_PI * cOne * (tau[0] - 1.0 + 1.0 / Nx) * (1.0 + i));
1653     z_exp_mx[i] = std::exp(- 2.0 * M_PI * cOne * static_cast<Scalar>(i) / static_cast<Scalar>(Nx));
1654   }
1655   for (UnsignedInteger j = 0; j < Ny; ++j)
1656   {
1657     fy[j] = std::exp(- M_PI * cOne * (tau[1] - 1.0 + 1.0 / Ny) * (1.0 + j));
1658     z_exp_my[j] = std::exp(- 2.0 * M_PI * cOne * static_cast<Scalar>(j) / static_cast<Scalar>(Ny));
1659   }
1660   Point xPlus(Nx);
1661   Point xMinus(Nx);
1662   Point yPlus(Ny);
1663   Point yMinus(Ny);
1664   for (UnsignedInteger i = 0; i < Nx; ++i)
1665   {
1666     xPlus[i] = (i + 1) * h[0];
1667     xMinus[i] = (static_cast<Scalar>(i) - Nx) * h[0];
1668   }
1669   for (UnsignedInteger j = 0; j < Ny; ++j)
1670   {
1671     yPlus[j] = (j + 1) * h[1];
1672     yMinus[j] = (static_cast<Scalar>(j) - Ny) * h[1];
1673   }
1674   ComplexMatrix yk(Nx, Ny);
1675   // 1) compute \Sigma_++
1676   const  AddPDFOn2DGridPolicy policyGridPP(*this, xPlus, yPlus, *(yk.getImplementation().get()));
1677   TBB::ParallelFor( 0, Nx * Ny, policyGridPP);
1678   for (UnsignedInteger j = 0; j < Ny; ++j)
1679     for (UnsignedInteger i = 0; i < Nx; ++i)
1680       yk(i, j) *= fx[i] * fy[j];
1681 
1682   ComplexMatrix sigma_plus_plus(fftAlgorithm_.transform2D(yk));
1683   for (UnsignedInteger j = 0; j < Ny; ++j)
1684     for (UnsignedInteger i = 0; i < Nx; ++i)
1685       sigma_plus_plus(i, j) *= z_exp_mx[i] * z_exp_my[j];
1686 
1687   // 2) compute \Sigma_--
1688   ComplexMatrix ykc(Nx, Ny);
1689   for (UnsignedInteger j = 0; j < Ny; ++j)
1690     for (UnsignedInteger i = 0; i < Nx; ++i)
1691       ykc(i, j) = std::conj(yk(Nx - 1 - i, Ny - 1 - j));
1692   ComplexMatrix sigma_minus_minus(fftAlgorithm_.transform2D(ykc));
1693 
1694   // 3) compute \Sigma_+-
1695   const  AddPDFOn2DGridPolicy policyGridPM(*this, xPlus, yMinus, *(yk.getImplementation().get()));
1696   TBB::ParallelFor( 0, Nx * Ny, policyGridPM);
1697   for (UnsignedInteger j = 0; j < Ny; ++j)
1698     for (UnsignedInteger i = 0; i < Nx; ++i)
1699       yk(i, j) *= fx[i] * std::conj(fy[Ny - 1 - j]);
1700 
1701   ComplexMatrix sigma_plus_minus(fftAlgorithm_.transform2D(yk));
1702   for (UnsignedInteger j = 0; j < Ny; ++j)
1703     for (UnsignedInteger i = 0; i < Nx; ++i)
1704       sigma_plus_minus(i, j) *= z_exp_mx[i];
1705 
1706   // 4) compute \Sigma_-+
1707   for (UnsignedInteger j = 0; j < Ny; ++j)
1708     for (UnsignedInteger i = 0; i < Nx; ++i)
1709       ykc(i, j) = std::conj(yk(Nx - 1 - i, Ny - 1 - j));
1710 
1711   ComplexMatrix sigma_minus_plus(fftAlgorithm_.transform2D(ykc));
1712   for (UnsignedInteger j = 0; j < Ny; ++j)
1713     for (UnsignedInteger i = 0; i < Nx; ++i)
1714       sigma_minus_plus(i, j) *= z_exp_my[j];
1715 
1716   // 5) compute \Sigma_+0
1717   ComplexCollection yk0(Nx);
1718   Point x(2);
1719   x[1] = 0.0;
1720   for (UnsignedInteger i = 0; i < Nx; ++i)
1721   {
1722     x[0] = (i + 1) * h[0];
1723     yk0[i] = computeDeltaCharacteristicFunction(x) * fx[i];
1724   }
1725   ComplexCollection sigma_plus_0(fftAlgorithm_.transform(yk0));
1726   for (UnsignedInteger i = 0; i < Nx; ++i)
1727     sigma_plus_0[i] *= z_exp_mx[i];
1728 
1729   // 6) compute \Sigma_-0
1730   ComplexCollection yk0c(Nx);
1731   for (UnsignedInteger i = 0; i < Nx; ++i)
1732     yk0c[i] = std::conj(yk0[Nx - 1 - i]);
1733   ComplexCollection sigma_minus_0(fftAlgorithm_.transform(yk0c));
1734 
1735   // 7) compute \Sigma_0+
1736   if (Nx != Ny)
1737   {
1738     yk0.resize(Ny);
1739     yk0c.resize(Ny);
1740   }
1741   x[0] = 0.0;
1742   for (UnsignedInteger j = 0; j < Ny; ++j)
1743   {
1744     x[1] = (j + 1) * h[1];
1745     yk0[j] = computeDeltaCharacteristicFunction(x) * fy[j];
1746   }
1747   ComplexCollection sigma_0_plus(fftAlgorithm_.transform(yk0));
1748   for (UnsignedInteger j = 0; j < Ny; ++j)
1749     sigma_0_plus[j] *= z_exp_my[j];
1750 
1751   // 8) compute \Sigma_0-
1752   for (UnsignedInteger j = 0; j < Ny; ++j)
1753     yk0c[j] = std::conj(yk0[Ny - 1 - j]);
1754   ComplexCollection sigma_0_minus(fftAlgorithm_.transform(yk0c));
1755 
1756   UnsignedInteger counter = 0;
1757   const Scalar scaling = (h[0] * h[1]) / (4.0 * M_PI * M_PI);
1758   for (UnsignedInteger j = 0; j < Ny; ++j)
1759   {
1760     for (UnsignedInteger i = 0; i < Nx; ++i, ++counter)
1761     {
1762       result(counter, 0) += scaling * std::real(
1763                               sigma_plus_plus(i, j)   + sigma_minus_minus(i, j) +
1764                               sigma_plus_minus(i, j)  + sigma_minus_plus(i, j)  +
1765                               sigma_plus_0[i]  + sigma_minus_0[i] +
1766                               sigma_0_plus[j]  + sigma_0_minus[j]
1767                             );
1768     }
1769   }
1770 }
1771 
1772 struct AddPDFOn3DGridPolicy
1773 {
1774   const RandomMixture & mixture_;
1775   const Point & xPoints_;
1776   const Point & yPoints_;
1777   const Point & zPoints_;
1778   const UnsignedInteger nx_;
1779   const UnsignedInteger ny_;
1780   const UnsignedInteger nz_;
1781   Collection<Complex> & output_;
1782 
AddPDFOn3DGridPolicyAddPDFOn3DGridPolicy1783   AddPDFOn3DGridPolicy(const RandomMixture & mixture,
1784                        const Point & xPoints,
1785                        const Point & yPoints,
1786                        const Point & zPoints,
1787                        Collection<Complex> & output)
1788     : mixture_(mixture)
1789     , xPoints_(xPoints)
1790     , yPoints_(yPoints)
1791     , zPoints_(zPoints)
1792     , nx_(xPoints.getDimension())
1793     , ny_(yPoints.getDimension())
1794     , nz_(zPoints.getDimension())
1795     , output_(output)
1796   {}
1797 
operator ()AddPDFOn3DGridPolicy1798   inline void operator()( const TBB::BlockedRange<UnsignedInteger> & r ) const
1799   {
1800     Point x(3);
1801     for (UnsignedInteger i = r.begin(); i != r.end(); ++i)
1802     {
1803       const UnsignedInteger kk = i / nx_ / ny_;
1804       const UnsignedInteger jj = ( i - kk * nx_ * ny_ ) / nx_;
1805       const UnsignedInteger ii = i - kk * nx_ * ny_ - jj * nx_;
1806       x[0] = xPoints_[ii];
1807       x[1] = yPoints_[jj];
1808       x[2] = zPoints_[kk];
1809       output_[i] = mixture_.computeDeltaCharacteristicFunction(x);
1810     }
1811   }
1812 }; /* end struct AddPDFOn3DGridPolicy */
1813 
addPDFOn3DGrid(const Indices & pointNumber,const Point & h,const Point & tau,Sample & result) const1814 void RandomMixture::addPDFOn3DGrid(const Indices & pointNumber, const Point & h, const Point & tau, Sample & result) const
1815 {
1816   if (pointNumber.getSize() != 3) throw InvalidArgumentException(HERE) << "Error: the given indices must have dimension=3, here dimension=" << pointNumber.getSize();
1817 
1818   const UnsignedInteger Nx = pointNumber[0];
1819   const UnsignedInteger Ny = pointNumber[1];
1820   const UnsignedInteger Nz = pointNumber[2];
1821   Collection<Complex> fx(Nx);
1822   Collection<Complex> fy(Ny);
1823   Collection<Complex> fz(Nz);
1824   Collection<Complex> z_exp_mx(Nx);
1825   Collection<Complex> z_exp_my(Ny);
1826   Collection<Complex> z_exp_mz(Nz);
1827   const Complex cOne(0.0, 1.0);
1828   for (UnsignedInteger i = 0; i < Nx; ++i)
1829   {
1830     fx[i] = std::exp(- M_PI * cOne * (tau[0] - 1.0 + 1.0 / Nx) * (1.0 + i));
1831     z_exp_mx[i] = std::exp(- 2.0 * M_PI * cOne * static_cast<Scalar>(i) / static_cast<Scalar>(Nx));
1832   }
1833   for (UnsignedInteger j = 0; j < Ny; ++j)
1834   {
1835     fy[j] = std::exp(- M_PI * cOne * (tau[1] - 1.0 + 1.0 / Ny) * (1.0 + j));
1836     z_exp_my[j] = std::exp(- 2.0 * M_PI * cOne * static_cast<Scalar>(j) / static_cast<Scalar>(Ny));
1837   }
1838   for (UnsignedInteger k = 0; k < Nz; ++k)
1839   {
1840     fz[k] = std::exp(- M_PI * cOne * (tau[2] - 1.0 + 1.0 / Nz) * (1.0 + k));
1841     z_exp_mz[k] = std::exp(- 2.0 * M_PI * cOne * static_cast<Scalar>(k) / static_cast<Scalar>(Nz));
1842   }
1843   Point xPlus(Nx);
1844   Point xMinus(Nx);
1845   Point yPlus(Ny);
1846   Point yMinus(Ny);
1847   Point zPlus(Nz);
1848   Point zMinus(Nz);
1849   for (UnsignedInteger i = 0; i < Nx; ++i)
1850   {
1851     xPlus[i] = (i + 1) * h[0];
1852     xMinus[i] = (static_cast<Scalar>(i) - Nx) * h[0];
1853   }
1854   for (UnsignedInteger j = 0; j < Ny; ++j)
1855   {
1856     yPlus[j] = (j + 1) * h[1];
1857     yMinus[j] = (static_cast<Scalar>(j) - Ny) * h[1];
1858   }
1859   for (UnsignedInteger k = 0; k < Nz; ++k)
1860   {
1861     zPlus[k] = (k + 1) * h[2];
1862     zMinus[k] = (static_cast<Scalar>(k) - Nz) * h[2];
1863   }
1864   ComplexTensor yk(Nx, Ny, Nz);
1865   const  AddPDFOn3DGridPolicy policyGridPPP(*this, xPlus, yPlus, zPlus, *(yk.getImplementation().get()));
1866   TBB::ParallelFor( 0, Nx * Ny * Nz, policyGridPPP);
1867   for (UnsignedInteger k = 0; k < Nz; ++k)
1868     for (UnsignedInteger j = 0; j < Ny; ++j)
1869       for (UnsignedInteger i = 0; i < Nx; ++i)
1870         yk(i, j, k) *= fx[i] * fy[j] * fz[k];
1871 
1872   // 1) compute \Sigma_+++
1873   ComplexTensor sigma_plus_plus_plus(fftAlgorithm_.transform3D(yk));
1874   for (UnsignedInteger k = 0; k < Nz; ++k)
1875     for (UnsignedInteger j = 0; j < Ny; ++j)
1876       for (UnsignedInteger i = 0; i < Nx; ++i)
1877         sigma_plus_plus_plus(i, j, k) *= z_exp_mx[i] * z_exp_my[j] * z_exp_mz[k];
1878   // 2) compute \Sigma_---
1879   ComplexTensor ykc(Nx, Ny, Nz);
1880   for (UnsignedInteger k = 0; k < Nz; ++k)
1881     for (UnsignedInteger j = 0; j < Ny; ++j)
1882       for (UnsignedInteger i = 0; i < Nx; ++i)
1883         ykc(i, j, k) = std::conj(yk(Nx - 1 - i, Ny - 1 - j, Nz - 1 - k));
1884   ComplexTensor sigma_minus_minus_minus(fftAlgorithm_.transform3D(ykc));
1885 
1886   // 3) compute \Sigma_++-
1887   const  AddPDFOn3DGridPolicy policyGridPPM(*this, xPlus, yPlus, zMinus, *(yk.getImplementation().get()));
1888   TBB::ParallelFor( 0, Nx * Ny * Nz, policyGridPPM);
1889   for (UnsignedInteger k = 0; k < Nz; ++k)
1890     for (UnsignedInteger j = 0; j < Ny; ++j)
1891       for (UnsignedInteger i = 0; i < Nx; ++i)
1892         yk(i, j, k) *= fx[i] * fy[j] * std::conj(fz[Nz - 1 - k]);
1893 
1894   ComplexTensor sigma_plus_plus_minus(fftAlgorithm_.transform3D(yk));
1895   for (UnsignedInteger k = 0; k < Nz; ++k)
1896     for (UnsignedInteger j = 0; j < Ny; ++j)
1897       for (UnsignedInteger i = 0; i < Nx; ++i)
1898         sigma_plus_plus_minus(i, j, k) *= z_exp_mx[i] * z_exp_my[j];
1899 
1900   // 4) compute \Sigma_--+
1901   for (UnsignedInteger k = 0; k < Nz; ++k)
1902     for (UnsignedInteger j = 0; j < Ny; ++j)
1903       for (UnsignedInteger i = 0; i < Nx; ++i)
1904         ykc(i, j, k) = std::conj(yk(Nx - 1 - i, Ny - 1 - j, Nz - 1 - k));
1905 
1906   ComplexTensor sigma_minus_minus_plus(fftAlgorithm_.transform3D(ykc));
1907   for (UnsignedInteger k = 0; k < Nz; ++k)
1908     for (UnsignedInteger j = 0; j < Ny; ++j)
1909       for (UnsignedInteger i = 0; i < Nx; ++i)
1910         sigma_minus_minus_plus(i, j, k) *= z_exp_mz[k];
1911 
1912   // 5) compute \Sigma_+-+
1913   const  AddPDFOn3DGridPolicy policyGridPMP(*this, xPlus, yMinus, zPlus, *(yk.getImplementation().get()));
1914   TBB::ParallelFor( 0, Nx * Ny * Nz, policyGridPMP);
1915   for (UnsignedInteger k = 0; k < Nz; ++k)
1916     for (UnsignedInteger j = 0; j < Ny; ++j)
1917       for (UnsignedInteger i = 0; i < Nx; ++i)
1918         yk(i, j, k) *= fx[i] * std::conj(fy[Ny - 1 - j]) * fz[k];
1919 
1920   ComplexTensor sigma_plus_minus_plus(fftAlgorithm_.transform3D(yk));
1921   for (UnsignedInteger k = 0; k < Nz; ++k)
1922     for (UnsignedInteger j = 0; j < Ny; ++j)
1923       for (UnsignedInteger i = 0; i < Nx; ++i)
1924         sigma_plus_minus_plus(i, j, k) *= z_exp_mx[i] * z_exp_mz[k];
1925 
1926   // 6) compute \Sigma_-+-
1927   for (UnsignedInteger k = 0; k < Nz; ++k)
1928     for (UnsignedInteger j = 0; j < Ny; ++j)
1929       for (UnsignedInteger i = 0; i < Nx; ++i)
1930         ykc(i, j, k) = std::conj(yk(Nx - 1 - i, Ny - 1 - j, Nz - 1 - k));
1931 
1932   ComplexTensor sigma_minus_plus_minus(fftAlgorithm_.transform3D(ykc));
1933   for (UnsignedInteger k = 0; k < Nz; ++k)
1934     for (UnsignedInteger j = 0; j < Ny; ++j)
1935       for (UnsignedInteger i = 0; i < Nx; ++i)
1936         sigma_minus_plus_minus(i, j, k) *= z_exp_my[j];
1937 
1938   // 7) compute \Sigma_+--
1939   const  AddPDFOn3DGridPolicy policyGridPMM(*this, xPlus, yMinus, zMinus, *(yk.getImplementation().get()));
1940   TBB::ParallelFor( 0, Nx * Ny * Nz, policyGridPMM);
1941   for (UnsignedInteger k = 0; k < Nz; ++k)
1942     for (UnsignedInteger j = 0; j < Ny; ++j)
1943       for (UnsignedInteger i = 0; i < Nx; ++i)
1944         yk(i, j, k) *= fx[i] * std::conj(fy[Ny - 1 - j]) * std::conj(fz[Nz - 1 - k]);
1945 
1946   ComplexTensor sigma_plus_minus_minus(fftAlgorithm_.transform3D(yk));
1947   for (UnsignedInteger k = 0; k < Nz; ++k)
1948     for (UnsignedInteger j = 0; j < Ny; ++j)
1949       for (UnsignedInteger i = 0; i < Nx; ++i)
1950         sigma_plus_minus_minus(i, j, k) *= z_exp_mx[i];
1951 
1952   // 8) compute \Sigma_-++
1953   for (UnsignedInteger k = 0; k < Nz; ++k)
1954     for (UnsignedInteger j = 0; j < Ny; ++j)
1955       for (UnsignedInteger i = 0; i < Nx; ++i)
1956         ykc(i, j, k) = std::conj(yk(Nx - 1 - i, Ny - 1 - j, Nz - 1 - k));
1957 
1958   ComplexTensor sigma_minus_plus_plus(fftAlgorithm_.transform3D(ykc));
1959   for (UnsignedInteger k = 0; k < Nz; ++k)
1960     for (UnsignedInteger j = 0; j < Ny; ++j)
1961       for (UnsignedInteger i = 0; i < Nx; ++i)
1962         sigma_minus_plus_plus(i, j, k) *= z_exp_my[j] * z_exp_mz[k];
1963 
1964   // 9) compute \Sigma_++0
1965   ComplexMatrix yk0(Nx, Ny);
1966   Point x(3);
1967   x[2] = 0.0;
1968   for (UnsignedInteger j = 0; j < Ny; ++j)
1969   {
1970     x[1] = (j + 1) * h[1];
1971     for (UnsignedInteger i = 0; i < Nx; ++i)
1972     {
1973       x[0] = (i + 1) * h[0];
1974       yk0(i, j) = computeDeltaCharacteristicFunction(x) * fx[i] * fy[j];
1975     }
1976   }
1977   ComplexMatrix sigma_plus_plus_0(fftAlgorithm_.transform2D(yk0));
1978   for (UnsignedInteger j = 0; j < Ny; ++j)
1979     for (UnsignedInteger i = 0; i < Nx; ++i)
1980       sigma_plus_plus_0(i, j) *= z_exp_mx[i] * z_exp_my[j];
1981 
1982   // 10) compute \Sigma_--0
1983   ComplexMatrix yk0c(Nx, Ny);
1984   for (UnsignedInteger j = 0; j < Ny; ++j)
1985     for (UnsignedInteger i = 0; i < Nx; ++i)
1986       yk0c(i, j) = std::conj(yk0(Nx - 1 - i, Ny - 1 - j));
1987   ComplexMatrix sigma_minus_minus_0(fftAlgorithm_.transform2D(yk0c));
1988 
1989   // 11) compute \Sigma_0++
1990   if (Nx != Ny || Ny != Nz)
1991   {
1992     yk0 = ComplexMatrix(Ny, Nz);
1993     yk0c = ComplexMatrix(Ny, Nz);
1994   }
1995   x[0] = 0.0;
1996   for (UnsignedInteger k = 0; k < Nz; ++k)
1997   {
1998     x[2] = (k + 1) * h[2];
1999     for (UnsignedInteger j = 0; j < Ny; ++j)
2000     {
2001       x[1] = (j + 1) * h[1];
2002       yk0(j, k) = computeDeltaCharacteristicFunction(x) * fy[j] * fz[k];
2003     }
2004   }
2005   ComplexMatrix sigma_0_plus_plus(fftAlgorithm_.transform2D(yk0));
2006   for (UnsignedInteger k = 0; k < Nz; ++k)
2007     for (UnsignedInteger j = 0; j < Ny; ++j)
2008       sigma_0_plus_plus(j, k) *= z_exp_my[j] * z_exp_mz[k];
2009 
2010   // 12) compute \Sigma_0--
2011   for (UnsignedInteger k = 0; k < Nz; ++k)
2012     for (UnsignedInteger j = 0; j < Ny; ++j)
2013       yk0c(j, k) = std::conj(yk0(Ny - 1 - j, Nz - 1 - k));
2014   ComplexMatrix sigma_0_minus_minus(fftAlgorithm_.transform2D(yk0c));
2015 
2016   // 13) compute \Sigma_+0+
2017   if (Nx != Ny)
2018   {
2019     yk0 = ComplexMatrix(Nx, Nz);
2020     yk0c = ComplexMatrix(Nx, Nz);
2021   }
2022   x[1] = 0.0;
2023   for (UnsignedInteger k = 0; k < Nz; ++k)
2024   {
2025     x[2] = (k + 1) * h[2];
2026     for (UnsignedInteger i = 0; i < Nx; ++i)
2027     {
2028       x[0] = (i + 1) * h[0];
2029       yk0(i, k) = computeDeltaCharacteristicFunction(x) * fx[i] * fz[k];
2030     }
2031   }
2032   ComplexMatrix sigma_plus_0_plus(fftAlgorithm_.transform2D(yk0));
2033   for (UnsignedInteger k = 0; k < Nz; ++k)
2034     for (UnsignedInteger i = 0; i < Nx; ++i)
2035       sigma_plus_0_plus(i, k) *= z_exp_mx[i] * z_exp_mz[k];
2036 
2037   // 14) compute \Sigma_-0-
2038   for (UnsignedInteger k = 0; k < Nz; ++k)
2039     for (UnsignedInteger i = 0; i < Nx; ++i)
2040       yk0c(i, k) = std::conj(yk0(Nx - 1 - i, Nz - 1 - k));
2041   ComplexMatrix sigma_minus_0_minus(fftAlgorithm_.transform2D(yk0c));
2042 
2043   // 15) compute \Sigma_+-0
2044   if (Ny != Nz)
2045   {
2046     yk0 = ComplexMatrix(Nx, Ny);
2047     yk0c = ComplexMatrix(Nx, Ny);
2048   }
2049   x[2] = 0.0;
2050   for (UnsignedInteger j = 0; j < Ny; ++j)
2051   {
2052     x[1] = (static_cast<Scalar>(j) - Ny) * h[1];
2053     for (UnsignedInteger i = 0; i < Nx; ++i)
2054     {
2055       x[0] = (i + 1) * h[0];
2056       yk0(i, j) = computeDeltaCharacteristicFunction(x) * fx[i] * std::conj(fy[Ny - 1 - j]);
2057     }
2058   }
2059   ComplexMatrix sigma_plus_minus_0(fftAlgorithm_.transform2D(yk0));
2060   for (UnsignedInteger j = 0; j < Ny; ++j)
2061     for (UnsignedInteger i = 0; i < Nx; ++i)
2062       sigma_plus_minus_0(i, j) *= z_exp_mx[i];
2063 
2064   // 16) compute \Sigma_-+0
2065   for (UnsignedInteger j = 0; j < Ny; ++j)
2066     for (UnsignedInteger i = 0; i < Nx; ++i)
2067       yk0c(i, j) = std::conj(yk0(Nx - 1 - i, Ny - 1 - j));
2068   ComplexMatrix sigma_minus_plus_0(fftAlgorithm_.transform2D(yk0c));
2069   for (UnsignedInteger j = 0; j < Ny; ++j)
2070     for (UnsignedInteger i = 0; i < Nx; ++i)
2071       sigma_minus_plus_0(i, j) *= z_exp_my[j];
2072 
2073   // 17) compute \Sigma_+0-
2074   if (Nz != Ny)
2075   {
2076     yk0 = ComplexMatrix(Nx, Nz);
2077     yk0c = ComplexMatrix(Nx, Nz);
2078   }
2079   x[1] = 0.0;
2080   for (UnsignedInteger k = 0; k < Nz; ++k)
2081   {
2082     x[2] = (static_cast<Scalar>(k) - Nz) * h[2];
2083     for (UnsignedInteger i = 0; i < Nx; ++i)
2084     {
2085       x[0] = (i + 1) * h[0];
2086       yk0(i, k) = computeDeltaCharacteristicFunction(x) * fx[i] * std::conj(fz[Nz - 1 - k]);
2087     }
2088   }
2089   ComplexMatrix sigma_plus_0_minus(fftAlgorithm_.transform2D(yk0));
2090   for (UnsignedInteger k = 0; k < Nz; ++k)
2091     for (UnsignedInteger i = 0; i < Nx; ++i)
2092       sigma_plus_0_minus(i, k) *= z_exp_mx[i];
2093 
2094   // 18) compute \Sigma_-0+
2095   for (UnsignedInteger k = 0; k < Nz; ++k)
2096     for (UnsignedInteger i = 0; i < Nx; ++i)
2097       yk0c(i, k) = std::conj(yk0(Nx - 1 - i, Nz - 1 - k));
2098   ComplexMatrix sigma_minus_0_plus(fftAlgorithm_.transform2D(yk0c));
2099   for (UnsignedInteger k = 0; k < Nz; ++k)
2100     for (UnsignedInteger i = 0; i < Nx; ++i)
2101       sigma_minus_0_plus(i, k) *= z_exp_mz[k];
2102 
2103   // 19) compute \Sigma_0+-
2104   if (Nx != Ny)
2105   {
2106     yk0 = ComplexMatrix(Ny, Nz);
2107     yk0c = ComplexMatrix(Ny, Nz);
2108   }
2109   x[0] = 0.0;
2110   for (UnsignedInteger k = 0; k < Nz; ++k)
2111   {
2112     x[2] = (static_cast<Scalar>(k) - Nz) * h[2];
2113     for (UnsignedInteger j = 0; j < Ny; ++j)
2114     {
2115       x[1] = (j + 1) * h[1];
2116       yk0(j, k) = computeDeltaCharacteristicFunction(x) * fy[j] * std::conj(fz[Nz - 1 - k]);
2117     }
2118   }
2119   ComplexMatrix sigma_0_plus_minus(fftAlgorithm_.transform2D(yk0));
2120   for (UnsignedInteger k = 0; k < Nz; ++k)
2121     for (UnsignedInteger j = 0; j < Ny; ++j)
2122       sigma_0_plus_minus(j, k) *= z_exp_my[j];
2123 
2124   // 20) compute \Sigma_0-+
2125   for (UnsignedInteger k = 0; k < Nz; ++k)
2126     for (UnsignedInteger j = 0; j < Ny; ++j)
2127       yk0c(j, k) = std::conj(yk0(Ny - 1 - j, Nz - 1 - k));
2128   ComplexMatrix sigma_0_minus_plus(fftAlgorithm_.transform2D(yk0c));
2129   for (UnsignedInteger k = 0; k < Nz; ++k)
2130     for (UnsignedInteger j = 0; j < Ny; ++j)
2131       sigma_0_minus_plus(j, k) *= z_exp_mz[k];
2132 
2133   // 21) compute \Sigma_+00
2134   Collection<Complex> yk00(Nx);
2135   x[1] = 0.0;
2136   x[2] = 0.0;
2137   for (UnsignedInteger i = 0; i < Nx; ++i)
2138   {
2139     x[0] = (i + 1) * h[0];
2140     yk00[i] = computeDeltaCharacteristicFunction(x) * fx[i];
2141   }
2142   Collection<Complex> sigma_plus_0_0(fftAlgorithm_.transform(yk00));
2143   for (UnsignedInteger i = 0; i < Nx; ++i)
2144     sigma_plus_0_0[i] *= z_exp_mx[i];
2145 
2146   // 22) compute \Sigma_-00
2147   Collection<Complex> yk00c(Nx);
2148   for (UnsignedInteger i = 0; i < Nx; ++i)
2149     yk00c[i] = std::conj(yk00[Nx - 1 - i]);
2150   Collection<Complex> sigma_minus_0_0(fftAlgorithm_.transform(yk00c));
2151 
2152   // 23) compute \Sigma_0+0
2153   if (Nx != Ny)
2154   {
2155     yk00.resize(Ny);
2156     yk00c.resize(Ny);
2157   }
2158   x[0] = 0.0;
2159   x[2] = 0.0;
2160   for (UnsignedInteger j = 0; j < Ny; ++j)
2161   {
2162     x[1] = (j + 1) * h[1];
2163     yk00[j] = computeDeltaCharacteristicFunction(x) * fy[j];
2164   }
2165   Collection<Complex> sigma_0_plus_0(fftAlgorithm_.transform(yk00));
2166   for (UnsignedInteger j = 0; j < Ny; ++j)
2167     sigma_0_plus_0[j] *= z_exp_my[j];
2168 
2169   // 24) compute \Sigma_0-0
2170   for (UnsignedInteger j = 0; j < Ny; ++j)
2171     yk00c[j] = std::conj(yk00[Ny - 1 - j]);
2172   Collection<Complex> sigma_0_minus_0(fftAlgorithm_.transform(yk00c));
2173 
2174   // 25) compute \Sigma_00+
2175   if (Ny != Nz)
2176   {
2177     yk00.resize(Nz);
2178     yk00c.resize(Nz);
2179   }
2180   x[0] = 0.0;
2181   x[1] = 0.0;
2182   for (UnsignedInteger k = 0; k < Nz; ++k)
2183   {
2184     x[2] = (k + 1) * h[2];
2185     yk00[k] = computeDeltaCharacteristicFunction(x) * fz[k];
2186   }
2187   Collection<Complex> sigma_0_0_plus(fftAlgorithm_.transform(yk00));
2188   for (UnsignedInteger k = 0; k < Nz; ++k)
2189     sigma_0_0_plus[k] *= z_exp_mz[k];
2190 
2191   // 26) compute \Sigma_00-
2192   for (UnsignedInteger k = 0; k < Nz; ++k)
2193     yk00c[k] = std::conj(yk00[Nz - 1 - k]);
2194   Collection<Complex> sigma_0_0_minus(fftAlgorithm_.transform(yk00c));
2195 
2196   UnsignedInteger counter = 0;
2197   const Scalar scaling = (h[0] * h[1] * h[2]) / (8.0 * M_PI * M_PI * M_PI);
2198   for (UnsignedInteger k = 0; k < Nz; ++k)
2199   {
2200     for (UnsignedInteger j = 0; j < Ny; ++j)
2201     {
2202       for (UnsignedInteger i = 0; i < Nx; ++i, ++counter)
2203       {
2204         result(counter, 0) += scaling * std::real(
2205                                 sigma_plus_plus_plus(i, j, k)   + sigma_minus_minus_minus(i, j, k) +
2206                                 sigma_plus_plus_minus(i, j, k)  + sigma_minus_minus_plus(i, j, k)  +
2207                                 sigma_plus_minus_plus(i, j, k)  + sigma_minus_plus_minus(i, j, k)  +
2208                                 sigma_plus_minus_minus(i, j, k) + sigma_minus_plus_plus(i, j, k)   +
2209                                 sigma_plus_plus_0(i, j)  + sigma_minus_minus_0(i, j) +
2210                                 sigma_plus_minus_0(i, j) + sigma_minus_plus_0(i, j)  +
2211                                 sigma_plus_0_plus(i, k)  + sigma_minus_0_minus(i, k) +
2212                                 sigma_plus_0_minus(i, k) + sigma_minus_0_plus(i, k)  +
2213                                 sigma_0_plus_plus(j, k)  + sigma_0_minus_minus(j, k) +
2214                                 sigma_0_plus_minus(j, k) + sigma_0_minus_plus(j, k)  +
2215                                 sigma_0_0_plus[k] + sigma_0_0_minus[k] +
2216                                 sigma_0_plus_0[j] + sigma_0_minus_0[j] +
2217                                 sigma_plus_0_0[i] + sigma_minus_0_0[i]
2218                               );
2219       }
2220     }
2221   }
2222 }
2223 
2224 /* Get the CDF of the RandomMixture */
computeCDF(const Point & point) const2225 Scalar RandomMixture::computeCDF(const Point & point) const
2226 {
2227   if (point.getDimension() != getDimension())
2228     throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << getDimension() << ", here dimension=" << point.getDimension();
2229 
2230   if (point.getDimension() != 1) return computeProbability(Interval(getRange().getLowerBound(), point));
2231 
2232   const Scalar x = point[0];
2233   // Special case for combination containing only one contributor Y = alpha * X + beta
2234   // for alpha > 0.0:
2235   // P(Y < y) = P(X < (y - beta) / alpha) = CDF_X((y - beta) / alpha)
2236   // for alpha < 0.0:
2237   // P(Y < y) = P(X > (y - beta) / alpha) = 1.0 - CDF_X((y - beta) / alpha)
2238   if (isAnalytical_ && (dimension_ == 1))
2239   {
2240     const Scalar alpha = weights_(0, 0);
2241     const Scalar cdf = alpha > 0.0 ? distributionCollection_[0].computeCDF((x - constant_[0]) / alpha) : distributionCollection_[0].computeComplementaryCDF((x - constant_[0]) / alpha);
2242     return cdf;
2243   }
2244   // Check range
2245   const Interval range(getRange());
2246   const Scalar lowerBound = range.getLowerBound()[0];
2247   const Scalar upperBound = range.getUpperBound()[0];
2248   if (x <= lowerBound) return 0.0;
2249   if (x >= upperBound) return 1.0;
2250   if (!isContinuous() && distributionCollection_.getSize() > 1) throw NotYetImplementedException(HERE) << "Error: no algorithm is currently available for the non-continuous case with more than one atom.";
2251   // Special case for 1D distributions with exactly 2 continuous atoms
2252   if ((dimension_ == 1) && (distributionCollection_.getSize() == 2) && distributionCollection_[0].isContinuous() && distributionCollection_[1].isContinuous())
2253   {
2254     // Z = alpha0 + alpha1 X1 + alpha2 X2
2255     // F(z) = P(Z < z) = P(alpha1 X1 + alpha2 X2 < z0) with z0 = z - alpha0
2256     // If alpha2>0:
2257     // F(z) = \int_R F_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1
2258     // Else:
2259     // F(z) = \int_R Fbar_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1
2260     // Get the parameters of the random mixture
2261     const Scalar z0 = x - constant_[0];
2262     const Scalar alpha1 = weights_(0, 0);
2263     const Scalar alpha2 = weights_(0, 1);
2264     // Get the bounds of the atoms
2265     const Scalar a = distributionCollection_[0].getRange().getLowerBound()[0];
2266     const Scalar b = distributionCollection_[0].getRange().getUpperBound()[0];
2267     const Scalar c = distributionCollection_[1].getRange().getLowerBound()[0];
2268     const Scalar d = distributionCollection_[1].getRange().getUpperBound()[0];
2269     // If alpha2 > 0
2270     if (alpha2 > 0.0)
2271     {
2272       // F(z) = \int_R F_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1
2273       const CDFKernelRandomMixture convolutionKernel(alpha1, alpha2, distributionCollection_[0].getImplementation(), distributionCollection_[1].getImplementation(), z0);
2274       // bounds:
2275       // x1 >= a otherwise p_X1 == 0
2276       // x1 <= b otherwise p_X1 == 0
2277       // (z0 - alpha1 x1) / alpha2 >= c otherwise F_X2 == 0
2278       // (z0 - alpha1 x1) / alpha2 <= d otherwise F_X2 == 1
2279       if (alpha1 > 0.0)
2280       {
2281         // (z0 - alpha1 x1) / alpha2 >= c <=> x1 <= (z0 - alpha2 * c) / alpha1 = beta
2282         // (z0 - alpha1 x1) / alpha2 <= d <=> x1 >= (z0 - alpha2 * d) / alpha1 = alpha
2283         const Scalar alpha = (z0 - alpha2 * d) / alpha1;
2284         const Scalar beta  = (z0 - alpha2 * c) / alpha1;
2285         const Scalar lower = std::max(a, alpha);
2286         const Scalar upper = std::min(b, beta);
2287         Scalar cdf = 0.0;
2288         if (lower < upper) cdf = algo_.integrate(convolutionKernel, lower, upper);
2289         // Take into account a possible missing tail:
2290         // \int_a^alpha F_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1 = F_X1(alpha)
2291         if (lower > a) cdf += distributionCollection_[0].computeCDF(alpha);
2292         return cdf;
2293       }
2294       // Here alpha1 < 0
2295       // (z0 - alpha1 x1) / alpha2 >= c <=> x1 >= (z0 - alpha2 * c) / alpha1 = alpha
2296       // (z0 - alpha1 x1) / alpha2 <= d <=> x1 <= (z0 - alpha2 * d) / alpha1 = beta
2297       const Scalar alpha = (z0 - alpha2 * c) / alpha1;
2298       const Scalar beta  = (z0 - alpha2 * d) / alpha1;
2299       const Scalar lower = std::max(a, alpha);
2300       const Scalar upper = std::min(b, beta);
2301       Scalar cdf = 0.0;
2302       if (lower < upper) cdf = algo_.integrate(convolutionKernel, lower, upper);
2303       // Take into account a possible missing tail:
2304       // \int_beta^b F_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1 = Fbar_X1(beta)
2305       if (upper < b) cdf += distributionCollection_[0].computeComplementaryCDF(beta);
2306       return cdf;
2307     } // alpha2 > 0
2308     // F(z) = \int_R Fbar_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1
2309     const ComplementaryCDFKernelRandomMixture convolutionKernel(alpha1, alpha2, distributionCollection_[0].getImplementation(), distributionCollection_[1].getImplementation(), z0);
2310     // bounds:
2311     // x1 >= a otherwise p_X1 == 0
2312     // x1 <= b otherwise p_X1 == 0
2313     // (z0 - alpha1 x1) / alpha2 >= c otherwise Fbar_X2 == 1
2314     // (z0 - alpha1 x1) / alpha2 <= d otherwise Fbar_X2 == 0
2315     if (alpha1 > 0.0)
2316     {
2317       // (z0 - alpha1 x1) / alpha2 >= c <=> x1 >= (z0 - alpha2 * c) / alpha1 = alpha
2318       // (z0 - alpha1 x1) / alpha2 <= d <=> x1 <= (z0 - alpha2 * d) / alpha1 = beta
2319       const Scalar alpha = (z0 - alpha2 * c) / alpha1;
2320       const Scalar beta  = (z0 - alpha2 * d) / alpha1;
2321       const Scalar lower = std::max(a, alpha);
2322       const Scalar upper = std::min(b, beta);
2323       Scalar cdf = 0.0;
2324       if (lower < upper) cdf = algo_.integrate(convolutionKernel, lower, upper);
2325       // Take into account a possible missing tail:
2326       // \int_beta^b Fbar_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1 = Fbar_X1(beta)
2327       if (lower > a) cdf += distributionCollection_[0].computeCDF(alpha);
2328       return cdf;
2329     }
2330     // Here alpha1 < 0
2331     // (z0 - alpha1 x1) / alpha2 >= c <=> x1 <= (z0 - alpha2 * c) / alpha1 = beta
2332     // (z0 - alpha1 x1) / alpha2 <= d <=> x1 >= (z0 - alpha2 * d) / alpha1 = alpha
2333     const Scalar alpha = (z0 - alpha2 * d) / alpha1;
2334     const Scalar beta  = (z0 - alpha2 * c) / alpha1;
2335     const Scalar lower = std::max(a, alpha);
2336     const Scalar upper = std::min(b, beta);
2337     Scalar cdf = 0.0;
2338     if (lower < upper) cdf = algo_.integrate(convolutionKernel, lower, upper);
2339     // Take into account a possible missing tail:
2340     // \int_a^alpha Fbar_X2((z0 - alpha1 x1) / alpha2)p_X1(x1)dx1 = F_X1(alpha)
2341     if (upper < b) cdf += distributionCollection_[0].computeComplementaryCDF(beta);
2342     return cdf;
2343   } // dimension_ == 1 && size == 2
2344 
2345   // Here we call computeProbability with a ]-inf, x] interval
2346   const Scalar cdf = computeProbability(Interval(Point(1, lowerBound), point, getRange().getFiniteLowerBound(), Interval::BoolCollection(1, true)));
2347   if (cdf < 0.5) return cdf;
2348   // and if the cdf value is less than 1/2, it was better to use the complementary CDF
2349   else return 1.0 - computeProbability(Interval(point, Point(1, upperBound), Interval::BoolCollection(1, true), getRange().getFiniteUpperBound()));
2350 }
2351 
computeComplementaryCDF(const Point & point) const2352 Scalar RandomMixture::computeComplementaryCDF(const Point & point) const
2353 {
2354   if (point.getDimension() != getDimension())
2355     throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << getDimension() << ", here dimension=" << point.getDimension();
2356 
2357   if (getDimension() > 1) return DistributionImplementation::computeComplementaryCDF(point);
2358   const Scalar x = point[0];
2359   // Special case for combination containing only one contributor Y = alpha * X + beta
2360   // for alpha > 0.0:
2361   // P(Y < y) = P(X < (y - beta) / alpha) = CDF_X((y - beta) / alpha)
2362   // for alpha < 0.0:
2363   // P(Y < y) = P(X > (y - beta) / alpha) = 1.0 - CDF_X((y - beta) / alpha)
2364   if (isAnalytical_)
2365   {
2366     const Scalar alpha = weights_(0, 0);
2367     if (alpha > 0.0) return distributionCollection_[0].computeComplementaryCDF((x - constant_[0]) / alpha);
2368     // If alpha < 0.0, compute the CDF
2369     return distributionCollection_[0].computeCDF((x - constant_[0]) / alpha);
2370   }
2371   // Check range
2372   const Interval range(getRange());
2373   const Scalar lowerBound = range.getLowerBound()[0];
2374   const Scalar upperBound = range.getUpperBound()[0];
2375   if (x <= lowerBound) return 1.0;
2376   if (x >= upperBound) return 0.0;
2377   // Here we call computeProbability with a [x, +inf[ interval
2378   // Here we call computeProbability with a ]-inf, x] interval
2379   const Scalar complementaryCDF = computeProbability(Interval(point, Point(1, upperBound), Interval::BoolCollection(1, true), getRange().getFiniteUpperBound()));
2380   if (complementaryCDF < 0.5) return complementaryCDF;
2381   // and if the cdf value is less than 1/2, it was better to use the complementary CDF
2382   else return 1.0 - computeProbability(Interval(Point(1, lowerBound), point, getRange().getFiniteLowerBound(), Interval::BoolCollection(1, true)));
2383 }
2384 
2385 /*  Compute the CDF of 1D distributions over a regular grid. The precision is reduced as this method is for drawing purpose only. */
computeCDF(const Scalar xMin,const Scalar xMax,const UnsignedInteger pointNumber,Sample & grid) const2386 Sample RandomMixture::computeCDF(const Scalar xMin,
2387                                  const Scalar xMax,
2388                                  const UnsignedInteger pointNumber,
2389                                  Sample & grid) const
2390 {
2391   return DistributionImplementation::computeCDF(xMin, xMax, pointNumber, grid);
2392 }
2393 
2394 /* Get the probability content of an interval. It uses the Poisson inversion formula as described in the reference:
2395    "Abate, J. and Whitt, W. (1992). The Fourier-series method for inverting
2396    transforms of probability distributions. Queueing Systems 10, 5--88., 1992",
2397    formula 5.14.
2398    We use an incremental update of the trigonometric functions and reduce the complex arithmetic to a real
2399    arithmetic for performance purpose.
2400 */
computeProbability(const Interval & interval) const2401 Scalar RandomMixture::computeProbability(const Interval & interval) const
2402 {
2403   const UnsignedInteger dimension = getDimension();
2404   if (interval.getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given interval must have dimension=" << dimension << ", here dimension=" << interval.getDimension();
2405 
2406   if (interval.isEmpty()) return 0.0;
2407   // Use direct convolution for two continuous atoms of dimension 1
2408   if ((dimension_ == 1) && (distributionCollection_.getSize() == 2) && distributionCollection_[0].isContinuous() && distributionCollection_[1].isContinuous())
2409   {
2410     const Scalar lower = interval.getLowerBound()[0];
2411     const Scalar upper = interval.getUpperBound()[0];
2412     const Scalar cdfLower = computeCDF(lower);
2413     const Scalar cdfUpper = computeCDF(upper);
2414     return std::min(1.0, std::max(0.0, cdfUpper - cdfLower));
2415   }
2416   if ((dimension != 1) || (distributionCollection_.getSize() >= ResourceMap::GetAsUnsignedInteger( "RandomMixture-SmallSize" )))
2417   {
2418     pdfPrecision_ = std::pow(SpecFunc::ScalarEpsilon, 2.0 / (3.0 * dimension_));
2419     const UnsignedInteger n1 = ResourceMap::GetAsUnsignedInteger("RandomMixture-MarginalIntegrationNodesNumber");
2420     const UnsignedInteger N = ResourceMap::GetAsUnsignedInteger("RandomMixture-MaximumIntegrationNodesNumber");
2421     const UnsignedInteger n2 = static_cast<UnsignedInteger>(round(std::pow(N, 1.0 / dimension_)));
2422     const UnsignedInteger marginalSize = SpecFunc::NextPowerOfTwo(std::min(n1, n2));
2423     setIntegrationNodesNumber(marginalSize);
2424     if (isContinuous()) return computeProbabilityContinuous(interval);
2425     // Generic implementation for discrete distributions
2426     if (isDiscrete())   return computeProbabilityDiscrete(interval);
2427     // Generic implementation for general distributions
2428     return computeProbabilityGeneral(interval);
2429   }
2430   // Special case for combination containing only one contributor
2431   if (isAnalytical_ && (dimension_ == 1))
2432   {
2433     const Scalar lower = interval.getLowerBound()[0];
2434     const Scalar upper = interval.getUpperBound()[0];
2435     const Scalar alpha = weights_(0, 0);
2436     // Negative alpha, swap upper and lower bound flags
2437     if (alpha < 0.0) return distributionCollection_[0].computeProbability(Interval((upper - constant_[0]) / alpha, (lower - constant_[0]) / alpha));
2438     return distributionCollection_[0].computeProbability(Interval((lower - constant_[0]) / alpha, (upper - constant_[0]) / alpha));
2439   } // isAnalytical
2440   const Interval clippedInterval(getRange().intersect(interval));
2441   // Quick return if there is no mass in the clipped interval
2442   if (clippedInterval.isEmpty()) return 0.0;
2443   const Bool finiteLowerBound = clippedInterval.getFiniteLowerBound()[0] == 1;
2444   const Bool finiteUpperBound = clippedInterval.getFiniteUpperBound()[0] == 1;
2445   // Quick return for integral over the whole real line
2446   if (!finiteLowerBound && !finiteUpperBound) return 1.0;
2447   const Scalar lowerBound = clippedInterval.getLowerBound()[0];
2448   const Scalar upperBound = clippedInterval.getUpperBound()[0];
2449   // Small size case: use Fourier series
2450   const Scalar precision = cdfPrecision_;
2451   Scalar error = 2.0 * precision;
2452   const Scalar a = referenceBandwidth_[0] * lowerBound;
2453   const Scalar b = referenceBandwidth_[0] * upperBound;
2454   const Scalar factor = referenceBandwidth_[0] / M_PI;
2455   Scalar value = computeEquivalentNormalCDFSum(lowerBound, upperBound);
2456   UnsignedInteger k = 1;
2457   const UnsignedInteger kmin = 1 << blockMin_;
2458   const UnsignedInteger kmax = 1 << blockMax_;
2459   while ( (k < kmax) && (error > std::max(precision, std::abs(precision * value)) || k < kmin) )
2460   {
2461     error = 0.0;
2462     for (UnsignedInteger m = k; m < 2 * k; ++m)
2463     {
2464       Scalar sinMHLower = std::sin(m * a);
2465       Scalar cosMHLower = std::cos(m * a);
2466       Scalar sinMHUpper = std::sin(m * b);
2467       Scalar cosMHUpper = std::cos(m * b);
2468       const Complex deltaValue(computeDeltaCharacteristicFunction(m));
2469       const Scalar contribution = factor * (deltaValue.real() * (sinMHUpper - sinMHLower) + deltaValue.imag() * (cosMHLower - cosMHUpper)) / (m * referenceBandwidth_[0]);
2470       value += contribution;
2471       error += std::abs(contribution);
2472     }
2473     k *= 2;
2474   }
2475   // For extrem values of the argument, the computed value can be slightly outside of [0,1]. Truncate it.
2476   return (value < 0.0 ? 0.0 : (value > 1.0 ? 1.0 : value));
2477 }
2478 
2479 /*  Compute the quantile over a regular grid */
computeQuantile(const Scalar qMin,const Scalar qMax,const UnsignedInteger pointNumber,const Bool tail) const2480 Sample RandomMixture::computeQuantile(const Scalar qMin,
2481                                       const Scalar qMax,
2482                                       const UnsignedInteger pointNumber,
2483                                       const Bool tail) const
2484 {
2485   if (getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: cannot compute the quantile over a regular 1D grid if the dimension is > 1";
2486   Sample result(pointNumber, 2);
2487   Scalar q = (tail ? 1.0 - qMax : qMin);
2488   const Scalar step = (qMax - qMin) / Scalar(pointNumber - 1.0);
2489   for (UnsignedInteger i = 0; i < pointNumber; ++i)
2490   {
2491     result(i, 0) = q;
2492     result(i, 1) = computeQuantile(q)[0];
2493     q += step;
2494   }
2495   return result;
2496 }
2497 
2498 /* Quantile computation for dimension=1 */
computeScalarQuantile(const Scalar prob,const Bool tail) const2499 Scalar RandomMixture::computeScalarQuantile(const Scalar prob,
2500     const Bool tail) const
2501 {
2502   if (dimension_ != 1) throw InvalidDimensionException(HERE) << "Error: the method computeScalarQuantile is only defined for 1D distributions";
2503 
2504   // Special case for random mixture with only 1 atom: Y = alpha * X + beta
2505   // find Yq such that P(Y < Yq) = q
2506   // i.e. for alpha > 0
2507   // P(X < (Yq - beta) / alpha) = q -> Yq = alpha * Xq + beta where P(X < Xq) = q
2508   // and for alpha < 0
2509   // P(X > (Yq - beta) / alpha) = q i.e. P(X < (Yq - beta) / alpha) = r with r = 1-q -> Yq = alpha * Xr + beta
2510   if (isAnalytical_)
2511   {
2512     const Scalar alpha = weights_(0, 0);
2513     // tail && alpha > 0: 1-p
2514     // tail && alpha <= 0: p
2515     // !tail && alpha > 0: p
2516     // !tail && alpha <= 0: 1-p
2517     // ->1-p iff tail != (alpha <= 0) (xor)
2518     const Scalar q = distributionCollection_[0].computeQuantile(prob, tail != (alpha <= 0.0))[0];
2519     return q * alpha + constant_[0];
2520   }
2521   if (isContinuous())
2522   {
2523     // General continuous case
2524     // Try a Newton method to benefit from the additive nature of Poisson's summation formula:
2525     // F(x_n+dx_n)~F(x_n)+p(x_n)dx_n
2526     // so F(x_n+dx_n)=q gives dx_n = (q - F(x_n)) / p(x_n)
2527     // and at the next step we have to compute F(x_n+dx_n), p(x_n+dx_n)
2528     // but F(x_n+dx_n)=F(x_n)+P(X\in[x_n,x_n+dx_n])
2529     const Scalar q = (tail ? 1.0 - prob : prob);
2530     Scalar x = equivalentNormal_.computeQuantile(q)[0];
2531     Scalar sigma = equivalentNormal_.getStandardDeviation()[0];
2532     Scalar epsilon = cdfEpsilon_ * sigma;
2533     Scalar dx = sigma;
2534     Scalar cdf = computeCDF(x);
2535     const Bool twoAtoms = distributionCollection_.getSize() == 2;
2536     for (UnsignedInteger i = 0; i < 16 && std::abs(dx) > epsilon; ++i)
2537     {
2538       const Scalar pdf = computePDF(x);
2539       dx = (q - cdf) / pdf;
2540       // Depending on the size of the mixture, use computeCDF (size == 2) or computeProbability (size > 2)
2541       if (twoAtoms) cdf = computeCDF(x);
2542       else
2543       {
2544         const Scalar dcdf = (dx > 0.0 ? computeProbability(Interval(x, x + dx)) : computeProbability(Interval(x + dx, x)));
2545         cdf += (dx > 0.0 ? dcdf : -dcdf);
2546       }
2547       x += dx;
2548     }
2549     // Has the Newton iteration converged?
2550     if (std::abs(dx) <= epsilon) return x;
2551   }
2552   // If no convergence of Newton's iteration of if non continuous and non analytical
2553   return DistributionImplementation::computeScalarQuantile(prob, tail);
2554 }
2555 
2556 /** Get the minimum volume level set containing a given probability of the distribution */
computeMinimumVolumeLevelSetWithThreshold(const Scalar prob,Scalar & threshold) const2557 LevelSet RandomMixture::computeMinimumVolumeLevelSetWithThreshold(const Scalar prob,
2558     Scalar & threshold) const
2559 {
2560   Function minimumVolumeLevelSetFunction(MinimumVolumeLevelSetEvaluation(clone()).clone());
2561   minimumVolumeLevelSetFunction.setGradient(MinimumVolumeLevelSetGradient(clone()).clone());
2562   // As we are in 1D and as the function defining the composite distribution can have complex variations,
2563   // we use an improved sampling method to compute the quantile of the -logPDF(X) distribution
2564   const UnsignedInteger size = SpecFunc::NextPowerOfTwo(ResourceMap::GetAsUnsignedInteger("Distribution-MinimumVolumeLevelSetSamplingSize"));
2565   const Sample minusLogPDFSample(computeLogPDF(getSampleByQMC(size)) * Point(1, -1.0));
2566   const Scalar minusLogPDFThreshold = minusLogPDFSample.computeQuantile(prob)[0];
2567   threshold = std::exp(-minusLogPDFThreshold);
2568 
2569   return LevelSet(minimumVolumeLevelSetFunction, LessOrEqual(), minusLogPDFThreshold);
2570 }
2571 
2572 /* Get the characteristic function of the distribution, i.e. phi(u) = E(exp(I*u*X)) */
computeCharacteristicFunction(const Scalar x) const2573 Complex RandomMixture::computeCharacteristicFunction(const Scalar x) const
2574 {
2575   if (x == 0.0) return 1.0;
2576   return std::exp(computeLogCharacteristicFunction(x));
2577 }
2578 
computeCharacteristicFunction(const Point & x) const2579 Complex RandomMixture::computeCharacteristicFunction(const Point & x) const
2580 {
2581   // The characteristic function is given by the following formula:
2582   // \phi(y) = \prod_{j=1}^{d} (exp(i * y_j * constant_j) * \prod_{k=1}^{n} \phi_{X_k}((M^t y)_k))
2583   // compute M^t * y
2584   return std::exp(computeLogCharacteristicFunction(x));
2585 }
2586 
computeLogCharacteristicFunction(const Scalar x) const2587 Complex RandomMixture::computeLogCharacteristicFunction(const Scalar x) const
2588 {
2589   if (x == 0.0) return 0.0;
2590   Complex logCfValue(0.0, constant_[0] * x);
2591   const UnsignedInteger size = distributionCollection_.getSize();
2592   const Scalar smallScalar = 0.5 * std::log(SpecFunc::MinScalar);
2593   for(UnsignedInteger i = 0; i < size; ++i)
2594   {
2595     logCfValue += distributionCollection_[i].computeLogCharacteristicFunction(weights_(0, i) * x);
2596     // Early exit for null value
2597     if (logCfValue.real() < smallScalar) break;
2598   } /* end for */
2599   return logCfValue;
2600 }
2601 
computeLogCharacteristicFunction(const Point & x) const2602 Complex RandomMixture::computeLogCharacteristicFunction(const Point & x) const
2603 {
2604   // The log-characteristic function is given by:
2605   // log(\phi(x)) = \sum_{j=1}^{d} ((i * y_j * constant_j) + \sum_{k=1}^{n} log(\phi_{X_k})((M^t x)_k))
2606   const UnsignedInteger dimension = getDimension();
2607   const UnsignedInteger size = distributionCollection_.getSize();
2608   const Scalar smallScalar = 0.5 * std::log(SpecFunc::MinScalar);
2609   // 1) compute the deterministic term
2610   Complex logCfValue;
2611   for (UnsignedInteger i = 0; i < dimension; ++i) logCfValue += Complex(0.0, x[i] * constant_[i]);
2612   // 2) compute the random part
2613   // The variables are independent
2614   for (UnsignedInteger i = 0; i < size; ++i)
2615   {
2616     // compute M^t * x
2617     Scalar wtx = 0.;
2618     for (UnsignedInteger j = 0; j < dimension; ++j) wtx += weights_(j, i) * x[j];
2619     logCfValue += distributionCollection_[i].computeLogCharacteristicFunction(wtx);
2620     if (logCfValue.real() < smallScalar) break;
2621   }
2622   return logCfValue;
2623 }
2624 
2625 /* Compute a value of the characteristic function on a prescribed discretization. As the value associated with index == 0 is known, it is not stored so for index > 0, the corresponding value is at position index-1 */
computeDeltaCharacteristicFunction(const UnsignedInteger index) const2626 Complex RandomMixture::computeDeltaCharacteristicFunction(const UnsignedInteger index) const
2627 {
2628   if (index == 0) return 0.0;
2629   // The cached values are computed and stored in an ascending order without hole: this function is always called on a sequence starting from 0 to n-1
2630   // Usual case first: the index is within the already computed values
2631   if (index <= storedSize_) return characteristicValuesCache_[index - 1];
2632   // If the index is higher than the maximum allowed storage
2633   if (index > maxSize_)
2634   {
2635     LOGINFO(OSS() << "Cache exceeded in RandomMixture::computeDeltaCharacteristicFunction, consider increasing maxSize_ to " << index);
2636     const Scalar x = index * referenceBandwidth_[0];
2637     const Complex logCF(computeLogCharacteristicFunction(x));
2638     const Complex logNormalCF(equivalentNormal_.computeLogCharacteristicFunction(x));
2639     const Complex deltaLog(logCF - logNormalCF);
2640     Complex value;
2641     if (std::abs(deltaLog) < 1.0e-5) value = std::exp(logNormalCF) * (deltaLog * (1.0 + deltaLog * (0.5 + deltaLog / 6.0)));
2642     else value = std::exp(logCF) - std::exp(logNormalCF);
2643     LOGDEBUG(OSS() << "ih=" << x << ", logCF=" << logCF << ", CF=" << std::exp(logCF) << ", logNormalCF=" << logNormalCF << ", NormalCF=" << std::exp(logNormalCF) << ", value=" << value);
2644     return value;
2645   }
2646   // Here, the index has not been computed so far, fill-in the gap
2647   if (index > storedSize_)
2648   {
2649     for (UnsignedInteger i = storedSize_ + 1; i <= index; ++i)
2650     {
2651       const Scalar x = i * referenceBandwidth_[0];
2652       const Complex logCF(computeLogCharacteristicFunction(x));
2653       const Complex logNormalCF(equivalentNormal_.computeLogCharacteristicFunction(x));
2654       const Complex deltaLog(logCF - logNormalCF);
2655       Complex value;
2656       if (std::abs(deltaLog) < 1.0e-5) value = std::exp(logNormalCF) * (deltaLog * (1.0 + deltaLog * (0.5 + deltaLog / 6.0)));
2657       else value = std::exp(logCF) - std::exp(logNormalCF);
2658       LOGDEBUG(OSS() << "ih=" << x << ", logCF=" << logCF << ", CF=" << std::exp(logCF) << ", logNormalCF=" << logNormalCF << ", NormalCF=" << std::exp(logNormalCF) << ", value=" << value);
2659       characteristicValuesCache_.add(value);
2660     }
2661     storedSize_ = index;
2662     return characteristicValuesCache_[storedSize_ - 1];
2663   }
2664   // Should never go there
2665   throw InvalidArgumentException(HERE) << "Error: trying to access to a cached characteristic value in an incorrect pattern.";
2666 }
2667 
2668 /* Compute the characteristic function of nD distributions by difference to a reference Normal distribution with the same mean and the same covariance */
computeDeltaCharacteristicFunction(const Point & x) const2669 Complex RandomMixture::computeDeltaCharacteristicFunction(const Point & x) const
2670 {
2671   // Direct application on a point ==> useful for computation on grid
2672   const Complex logCF(computeLogCharacteristicFunction(x));
2673   const Complex logNormalCF(equivalentNormal_.computeLogCharacteristicFunction(x));
2674   const Complex deltaLog(logCF - logNormalCF);
2675   if (std::abs(deltaLog) < 1.0e-5) return std::exp(logNormalCF) * (deltaLog * (1.0 + deltaLog * (0.5 + deltaLog / 6.0)));
2676   else return std::exp(logCF) - std::exp(logNormalCF);
2677 }
2678 
2679 /* Update cache */
updateCacheDeltaCharacteristicFunction(const Sample & points) const2680 void RandomMixture::updateCacheDeltaCharacteristicFunction(const Sample & points) const
2681 {
2682   for(UnsignedInteger i = 0; i < points.getSize(); ++i)
2683   {
2684     Point x(points[i]);
2685     // Computation of CF - NormalCF
2686     // Here we check if it is possible to reduce calculation
2687     // We reduce CF - NormalCF to NormalCF * (CF/NormalCF -1), which rewrites
2688     // as exp(logNormalCF) * (exp(deltaLog) - 1), with deltaLog=logCF - logNormalCF
2689     // We use a 3rd order Taylor expansion of exp(deltaLog) - 1 if |deltaLog| <= 1e-5
2690     const Complex logCF(computeLogCharacteristicFunction(x));
2691     const Complex logNormalCF(equivalentNormal_.computeLogCharacteristicFunction(x));
2692     const Complex deltaLog(logCF - logNormalCF);
2693     Complex value;
2694     if (std::abs(deltaLog) < 1.0e-5) value = std::exp(logNormalCF) * (deltaLog * (1.0 + deltaLog * (0.5 + deltaLog / 6.0)));
2695     else value = std::exp(logCF) - std::exp(logNormalCF);
2696     LOGDEBUG(OSS() << "ih=" << x << ", logCF=" << logCF << ", CF=" << std::exp(logCF) << ", logNormalCF=" << logNormalCF << ", NormalCF=" << std::exp(logNormalCF) << ", value=" << value);
2697     characteristicValuesCache_.add(value);
2698     ++storedSize_;
2699   }
2700 }
2701 
2702 /* Get the PDF gradient of the distribution */
computePDFGradient(const Point & point) const2703 Point RandomMixture::computePDFGradient(const Point & point) const
2704 {
2705   if (isAnalytical_ && (dimension_ == 1))
2706   {
2707     const Scalar alpha = weights_(0, 0);
2708     Scalar factor = (isDiscrete() ? 1.0 : 1.0 / std::abs(alpha));
2709     return distributionCollection_[0].computePDFGradient((point - constant_) / alpha) * factor;
2710   } // isAnalytical_
2711   return DistributionImplementation::computePDFGradient(point);
2712 }
2713 
2714 /* Get the CDF gradient of the distribution */
computeCDFGradient(const Point & point) const2715 Point RandomMixture::computeCDFGradient(const Point & point) const
2716 {
2717   if (isAnalytical_ && (dimension_ == 1))
2718   {
2719     const Scalar alpha = weights_(0, 0);
2720     if (alpha > 0.0) return distributionCollection_[0].computeCDFGradient((point - constant_) / alpha);
2721     // If alpha < 0.0, compute the complementary CDF
2722     return distributionCollection_[0].computeCDFGradient((point - constant_) / alpha) * (-1.0);
2723   } // isAnalytical_
2724   return DistributionImplementation::computePDFGradient(point);
2725 }
2726 
2727 /* Compute the mean of the RandomMixture */
computeMean() const2728 void RandomMixture::computeMean() const
2729 {
2730   mean_ = constant_;
2731   const UnsignedInteger size = distributionCollection_.getSize();
2732   Point mu(size);
2733   for(UnsignedInteger i = 0; i < size; ++i)
2734     mu[i] = distributionCollection_[i].getMean()[0];
2735   mean_ += weights_ * mu;
2736   isAlreadyComputedMean_ = true;
2737 }
2738 
2739 /* Compute the covariance of the RandomMixture */
computeCovariance() const2740 void RandomMixture::computeCovariance() const
2741 {
2742   // Compute the covariance of the mixture.
2743   // This method is private. Use the getCovariance to get the covariance value.
2744   // The covariance is given by
2745   //   Cov(Y) = weight * Cov(X) * weight^t
2746   // As Cov(X) is diagonal:
2747   //   Cov(Y)_{i,j} = \sum_{k=1}^n weights_{i,k} weights_{j,k} Cov(X_k, X_k)
2748   const UnsignedInteger dimension = getDimension();
2749   covariance_ = CovarianceMatrix(dimension);
2750   const UnsignedInteger size = distributionCollection_.getSize();
2751   for (UnsignedInteger i = 0; i < dimension; ++i)
2752   {
2753     for (UnsignedInteger j = 0; j <= i; ++j)
2754     {
2755       Scalar covariance = 0.0;
2756       for (UnsignedInteger k = 0; k < size; ++k)
2757       {
2758         covariance += weights_(i, k) * weights_(j, k) * distributionCollection_[k].getCovariance().operator()(0, 0);
2759       }
2760       covariance_(i, j) = covariance;
2761     }
2762   }
2763   isAlreadyComputedCovariance_ = true;
2764 }
2765 
2766 /* Get the standard deviation of the RandomMixture */
getStandardDeviation() const2767 Point RandomMixture::getStandardDeviation() const
2768 {
2769   // It looks like the default implementation but
2770   // it is not in the multivariate case. Even if
2771   // it looks wasteful to compute the whole covariance
2772   // matrix only for its diagonal, it is not so wasteful
2773   // as random mixtures are limited to dimension <= 3,
2774   // and it is much more efficient than using getCenteredMoment()
2775   // because the covariance is a simple algebra based on
2776   // coefficients and atoms covariance.
2777   const UnsignedInteger dimension = getDimension();
2778   Point sigma(dimension, 0.0);
2779   for (UnsignedInteger i = 0; i < dimension; ++i)
2780     sigma[i] = std::sqrt(getCovariance().operator()(i, i));
2781   return sigma;
2782 }
2783 
2784 /* Get the skewness of the RandomMixture */
getSkewness() const2785 Point RandomMixture::getSkewness() const
2786 {
2787   Point skewness(getDimension(), 0.0);
2788   const UnsignedInteger size = distributionCollection_.getSize();
2789   for (UnsignedInteger j = 0; j < getDimension(); ++j)
2790   {
2791     Scalar variance = 0.0;
2792     for(UnsignedInteger i = 0; i < size; ++i)
2793     {
2794       const Scalar wi = weights_(j, i);
2795       const Scalar wi2 = wi * wi;
2796       const Scalar vi = distributionCollection_[i].getCovariance().operator()(0, 0);
2797       variance += wi2 * vi;
2798       skewness[j] += wi2 * wi * distributionCollection_[i].getSkewness()[0] * std::pow(vi, 1.5);
2799     } /* end for */
2800     skewness[j] *= std::pow(variance, -1.5);
2801   }
2802   return skewness;
2803 }
2804 
2805 /* Get the kurtosis of the RandomMixture */
getKurtosis() const2806 Point RandomMixture::getKurtosis() const
2807 {
2808   Point kurtosis(getDimension(), 0.0);
2809   const UnsignedInteger size = distributionCollection_.getSize();
2810   Point v(size);
2811   Point w2(size);
2812   for (UnsignedInteger d = 0; d < getDimension(); ++d)
2813   {
2814     Scalar variance = 0.0;
2815     for(UnsignedInteger i = 0; i < size; ++i)
2816     {
2817       const Scalar wi = weights_(d, i);
2818       const Scalar wi2 = wi * wi;
2819       w2[i] = wi2;
2820       const Scalar vi = distributionCollection_[i].getCovariance().operator()(0, 0);
2821       v[i] = vi;
2822       variance += wi2 * vi;
2823       kurtosis[d] += wi2 * wi2 * distributionCollection_[i].getKurtosis()[0] * vi * vi;
2824       for (UnsignedInteger j = 0; j < i; ++j) kurtosis[d] += 6.0 * wi2 * w2[j] * vi * v[j];
2825     } /* end for */
2826     kurtosis[d] /= variance * variance;
2827   }
2828   return kurtosis;
2829 }
2830 
2831 /* Parameters value and description accessor */
getParametersCollection() const2832 RandomMixture::PointWithDescriptionCollection RandomMixture::getParametersCollection() const
2833 {
2834   // Assume that the weights are not part of the parameters
2835   const UnsignedInteger size = distributionCollection_.getSize();
2836   PointWithDescriptionCollection parameters(1);
2837   Description parametersDescription;
2838   // Form a big Point from the parameters of each atom
2839   for (UnsignedInteger i = 0; i < size; ++i)
2840   {
2841     const String prefix(distributionCollection_[i].getName());
2842     const PointWithDescription atomParameters(distributionCollection_[i].getParametersCollection()[0]);
2843     const Description atomDescription(atomParameters.getDescription());
2844     const UnsignedInteger atomParameterDimension = atomParameters.getDimension();
2845     // Add the current atom parameters
2846     for (UnsignedInteger j = 0; j < atomParameterDimension; ++j)
2847     {
2848       parameters[0].add(atomParameters[j]);
2849       parametersDescription.add(OSS() << prefix << "_" << atomDescription[j]);
2850     }
2851   }
2852   parameters[0].setDescription(parametersDescription);
2853   parameters[0].setName(getName());
2854   return parameters;
2855 } // getParametersCollection
2856 
2857 /* Parameters value and description accessor */
getParameter() const2858 Point RandomMixture::getParameter() const
2859 {
2860   // Assume that the weights are not part of the parameters
2861   const UnsignedInteger size = distributionCollection_.getSize();
2862   Point parameter;
2863   // Form a big Point from the parameters of each atom
2864   for (UnsignedInteger i = 0; i < size; ++i)
2865     parameter.add(distributionCollection_[i].getParameter());
2866   return parameter;
2867 } // getParameter
2868 
setParameter(const Point & parameter)2869 void RandomMixture::setParameter(const Point & parameter)
2870 {
2871   if (parameter.getSize() != getParameter().getSize()) throw InvalidArgumentException(HERE) << "Error: expected " << getParameter().getSize() << " values, got " << parameter.getSize();
2872   const UnsignedInteger size = distributionCollection_.getSize();
2873   UnsignedInteger shift = 0;
2874   for (UnsignedInteger i = 0; i < size; ++i)
2875   {
2876     Point localParameter(distributionCollection_[i].getParameter());
2877     std::copy(parameter.begin() + shift, parameter.begin() + shift + localParameter.getSize(), localParameter.begin());
2878     shift += localParameter.getSize();
2879     distributionCollection_[i].setParameter(localParameter);
2880   }
2881   setDistributionCollectionAndWeights(distributionCollection_, weights_, false);
2882 } // setParameter
2883 
2884 /* Parameters value and description accessor */
getParameterDescription() const2885 Description RandomMixture::getParameterDescription() const
2886 {
2887   // Assume that the weights are not part of the parameters
2888   const UnsignedInteger size = distributionCollection_.getSize();
2889   Description parameterDescription;
2890   // Form a big Point from the parameters of each atom
2891   for (UnsignedInteger i = 0; i < size; ++i)
2892   {
2893     const String prefix(distributionCollection_[i].getName());
2894     const Description atomParameterDescription(distributionCollection_[i].getParameterDescription());
2895     const UnsignedInteger atomParameterSize = atomParameterDescription.getSize();
2896     // Add the current atom parameters
2897     for (UnsignedInteger j = 0; j < atomParameterSize; ++j)
2898       parameterDescription.add(OSS() << prefix << "_" << atomParameterDescription[j]);
2899   }
2900   return parameterDescription;
2901 } // getParameterDescription
2902 
2903 /* Get a positon indicator for a 1D distribution */
getPositionIndicator() const2904 Scalar RandomMixture::getPositionIndicator() const
2905 {
2906   if (!isAlreadyComputedPositionIndicator_) computePositionIndicator();
2907   return positionIndicator_;
2908 }
2909 
2910 /* Compute a positon indicator for a 1D distribution */
computePositionIndicator() const2911 void RandomMixture::computePositionIndicator() const
2912 {
2913   if (getDimension() == 1)
2914   {
2915     positionIndicator_ = constant_[0];
2916     const UnsignedInteger size = distributionCollection_.getSize();
2917     // Assume an additive behaviour of the position indicator. It is true for the mean value, and almost true for the median of moderatly skewed distributions
2918     for(UnsignedInteger i = 0; i < size; ++i)
2919     {
2920       const Scalar wi = weights_(0, i);
2921       const Scalar mi = distributionCollection_[i].getPositionIndicator();
2922       positionIndicator_ += wi * mi;
2923     }
2924     isAlreadyComputedPositionIndicator_ = true;
2925   }
2926 }
2927 
2928 
2929 /* Get a dispersion indicator for a 1D distribution */
getDispersionIndicator() const2930 Scalar RandomMixture::getDispersionIndicator() const
2931 {
2932   if (!isAlreadyComputedDispersionIndicator_) computeDispersionIndicator();
2933   return dispersionIndicator_;
2934 }
2935 
2936 /* Compute a dispersion indicator for a 1D distribution */
computeDispersionIndicator() const2937 void RandomMixture::computeDispersionIndicator() const
2938 {
2939   if (getDimension() == 1)
2940   {
2941     dispersionIndicator_ = 0.0;
2942     const UnsignedInteger size = distributionCollection_.getSize();
2943     // Assume a quadratic additive behaviour of the dispersion indicator. It is true for the standard deviation value, and almost true for the interquartile of moderatly skewed distributions
2944     for(UnsignedInteger i = 0; i < size; ++i)
2945     {
2946       const Scalar wi = weights_(0, i);
2947       const Scalar si = distributionCollection_[i].getDispersionIndicator();
2948       dispersionIndicator_ += std::pow(wi * si, 2.0);
2949     }
2950     dispersionIndicator_ = std::sqrt(dispersionIndicator_);
2951     isAlreadyComputedDispersionIndicator_ = true;
2952   }
2953 }
2954 
2955 /* BlockMin accessor */
setBlockMin(const UnsignedInteger blockMin)2956 void RandomMixture::setBlockMin(const UnsignedInteger blockMin)
2957 {
2958   blockMin_ = blockMin;
2959 }
2960 
getBlockMin() const2961 UnsignedInteger RandomMixture::getBlockMin() const
2962 {
2963   return blockMin_;
2964 }
2965 
2966 /* BlockMax accessor */
setBlockMax(const UnsignedInteger blockMax)2967 void RandomMixture::setBlockMax(const UnsignedInteger blockMax)
2968 {
2969   blockMax_ = blockMax;
2970 }
2971 
getBlockMax() const2972 UnsignedInteger RandomMixture::getBlockMax() const
2973 {
2974   return blockMax_;
2975 }
2976 
2977 /* MaxSize accessor */
setMaxSize(const UnsignedInteger maxSize)2978 void RandomMixture::setMaxSize(const UnsignedInteger maxSize)
2979 {
2980   maxSize_ = maxSize;
2981   // The cache must grow progresively, so;
2982   // + if maxSize >= storedSize, we keep the current cache as it is
2983   // + if maxSize < storedSize, we reduce the cache and update the storedSize
2984   if (maxSize_ < storedSize_)
2985   {
2986     characteristicValuesCache_.resize(maxSize);
2987     storedSize_ = maxSize;
2988   }
2989 }
2990 
getMaxSize() const2991 UnsignedInteger RandomMixture::getMaxSize() const
2992 {
2993   return maxSize_;
2994 }
2995 
2996 /* Alpha accessor */
setAlpha(const Scalar alpha)2997 void RandomMixture::setAlpha(const Scalar alpha)
2998 {
2999   if (!(alpha > 0.0)) throw InvalidArgumentException(HERE) << "Error: the alpha parameter must be strictly positive";
3000   alpha_ = alpha;
3001   computeRange();
3002   computeReferenceBandwidth();
3003 }
3004 
getAlpha() const3005 Scalar RandomMixture::getAlpha() const
3006 {
3007   return alpha_;
3008 }
3009 
setBeta(const Scalar beta)3010 void RandomMixture::setBeta(const Scalar beta)
3011 {
3012   beta_ = beta;
3013   computeRange();
3014   computeReferenceBandwidth();
3015 }
3016 
getBeta() const3017 Scalar RandomMixture::getBeta() const
3018 {
3019   return beta_;
3020 }
3021 
3022 /* Reference bandwidth accessor */
setReferenceBandwidth(const Point & bandwidth)3023 void RandomMixture::setReferenceBandwidth(const Point & bandwidth)
3024 {
3025   referenceBandwidth_ = bandwidth;
3026   // Reset the cached values
3027   storedSize_ = 0;
3028   characteristicValuesCache_ = ComplexPersistentCollection(0);
3029 }
3030 
getReferenceBandwidth() const3031 Point RandomMixture::getReferenceBandwidth() const
3032 {
3033   return referenceBandwidth_;
3034 }
3035 
3036 /* PDF precision accessor. For other distributions, it is a read-only attribute. */
setPDFPrecision(const Scalar pdfPrecision)3037 void RandomMixture::setPDFPrecision(const Scalar pdfPrecision)
3038 {
3039   pdfPrecision_ = pdfPrecision;
3040 }
3041 
3042 /* CDF precision accessor. For other distributions, it is a read-only attribute. */
setCDFPrecision(const Scalar cdfPrecision)3043 void RandomMixture::setCDFPrecision(const Scalar cdfPrecision)
3044 {
3045   cdfPrecision_ = cdfPrecision;
3046 }
3047 
3048 /* Compute the reference bandwidth. It is defined as the largest bandwidth
3049    that allow a precise computation of the PDF over the range
3050    [positionIndicator_ +/- beta * dispersionIndicator_] */
computeReferenceBandwidth()3051 void RandomMixture::computeReferenceBandwidth()
3052 {
3053   referenceBandwidth_ = Point(getDimension(), 0.0);
3054   Bool isFinite = true;
3055   const Point a(getRange().getLowerBound());
3056   const Point b(getRange().getUpperBound());
3057   for (UnsignedInteger k = 0; k < getDimension(); ++k)
3058   {
3059     referenceBandwidth_[k] = 2.0 * M_PI / (b[k] - a[k]);
3060     isFinite &= (getRange().getFiniteLowerBound()[k] && getRange().getFiniteUpperBound()[k]);
3061   }
3062   // Shrink a little bit the bandwidth if the range is finite
3063   if (isFinite) referenceBandwidth_ *= 0.5;
3064 
3065   // Compute the reference bandwidth factor
3066   referenceBandwidthFactor_ = 1.0;
3067   for (UnsignedInteger k = 0; k < getDimension(); ++k) referenceBandwidthFactor_ *= (referenceBandwidth_[k] / (2.0 * M_PI));
3068 
3069   // Compute grid helper object
3070   gridMesher_ = SphereUniformNorm::GetFromGridSteps(referenceBandwidth_, true);
3071 
3072   // Reset the cached values
3073   storedSize_ = 0;
3074   characteristicValuesCache_ = ComplexPersistentCollection(0);
3075 }
3076 
3077 /* Compute the equivalent normal distribution, i.e. with the same mean and
3078    the same standard deviation */
computeEquivalentNormal()3079 void RandomMixture::computeEquivalentNormal()
3080 {
3081   if (distributionCollection_.getSize() > 0)
3082   {
3083     // If dimension > 1 use the first and second moments
3084     if (dimension_ > 1) equivalentNormal_ = Normal(getMean(), getCovariance());
3085     // Otherwise use more general parameters
3086     else equivalentNormal_ = Normal(getPositionIndicator(), getDispersionIndicator());
3087   }
3088   else equivalentNormal_ = Normal();
3089 }
3090 
3091 /* Compute the left-hand sum in Poisson's summation formula for the equivalent normal */
computeEquivalentNormalPDFSum(const Scalar x) const3092 Scalar RandomMixture::computeEquivalentNormalPDFSum(const Scalar x) const
3093 {
3094   if (dimension_ != 1) throw InvalidDimensionException(HERE) << "RandomMixture::computeEquivalentNormalPDFSum(Scalar) is only possible for dimension 1";
3095 
3096   Scalar value = equivalentNormal_.computePDF(x);
3097   UnsignedInteger i = 0;
3098   Scalar delta = 0.0;
3099   do
3100   {
3101     ++i;
3102     const Scalar step = 2.0 * M_PI * i / referenceBandwidth_[0];
3103     delta = equivalentNormal_.computePDF(x + step) + equivalentNormal_.computePDF(x - step);
3104     value += delta;
3105   }
3106   while (delta > 0.0 * value);
3107   return value;
3108 }
3109 
computeEquivalentNormalPDFSum(const Point & y,const Point & gridStep,UnsignedInteger imax,UnsignedInteger & levelMax) const3110 Scalar RandomMixture::computeEquivalentNormalPDFSum(const Point & y, const Point & gridStep,
3111     UnsignedInteger imax, UnsignedInteger & levelMax) const
3112 {
3113   /*
3114     Compute the left-hand sum in Poisson's summation formula for the equivalent normal.
3115     The goal is to compute:
3116     \sum_{i \in \mathbb{Z}^d} q(y + i * h) with :
3117     y = (y_1,...,y_d) point on which the pdf is requested
3118     q = the density function of the distribution computed by computeEquivalentNormal
3119     h = (h_1,...,h_d) the reference bandwidth
3120     i*h = (i_1 * h_1,...,i_d * h_d)
3121     The sum above is rewritten as:
3122     \sum_{s \in \mathbb{N}} \sum_{x such as \norm{x-y}_\infinity=s} q(x)
3123     We start with s=0 and at each iteration, we add the points which are exactly at
3124     distance s with norm L^\infinity.
3125     If s>0, there are (2s+1)^d - (2s-1)^d points to add at iteration s.
3126     The evaluation of the gaussian density at these points are added into the current sum.
3127     The summation halts when the added value at iteration s is negligible relative to
3128     the current density value.
3129   */
3130   if (gridStep.getDimension() != getDimension()) throw InvalidArgumentException(HERE) << "Error: invalid grid dimension";
3131   if (dimension_ == 1)
3132   {
3133     const Scalar x = y[0];
3134     Scalar value = equivalentNormal_.computePDF(x);
3135     UnsignedInteger i = 0;
3136     Scalar delta = 0.0;
3137     do
3138     {
3139       ++i;
3140       const Scalar step = i * gridStep[0];
3141       delta = equivalentNormal_.computePDF(x + step) + equivalentNormal_.computePDF(x - step);
3142       value += delta;
3143     }
3144     while (delta > 0.0 * value);
3145     return value;
3146   }
3147 
3148   // We cannot use gridMesher_; we need another instance, which does not use caching.
3149   // We force symmetry to improve performance.
3150   const SphereUniformNorm grid(SphereUniformNorm::GetFromGridSteps(gridStep, true));
3151 
3152   Scalar gaussian_pdf = equivalentNormal_.computePDF(y);
3153   Scalar delta = std::max(1.0, gaussian_pdf);
3154   const Scalar epsilon = pdfPrecision_;
3155 
3156   // If imax is zero, we want to store in levelMax the first level which does not improve accuracy.
3157   // If non zero, this means that a previous call had already computed levelMax, and levelMax
3158   // must not change.
3159   levelMax = imax;
3160   Point skin1(dimension_);
3161   Point skin2(dimension_);
3162   for (UnsignedInteger i = 1; (imax == 0 || i < imax) && (delta > gaussian_pdf * epsilon); ++i)
3163   {
3164     const Sample skinPoints(grid.getPoints(i));
3165 
3166     if (!imax) levelMax = i;
3167     const Scalar numberOfPoints = skinPoints.getSize();
3168     delta = 0.0;
3169     for (UnsignedInteger j = 0; j < numberOfPoints; ++j)
3170     {
3171       for (UnsignedInteger d = 0; d < dimension_; ++d)
3172       {
3173         skin1[d] = y[d] + skinPoints(j, d);
3174         skin2[d] = y[d] - skinPoints(j, d);
3175       }
3176       delta += equivalentNormal_.computePDF(skin1) + equivalentNormal_.computePDF(skin2);
3177     }
3178     gaussian_pdf += delta;
3179   }
3180   return gaussian_pdf;
3181 }
3182 
3183 /* Compute the left-hand sum in Poisson's summation formula for the equivalent normal */
computeEquivalentNormalCDFSum(const Scalar s,const Scalar t) const3184 Scalar RandomMixture::computeEquivalentNormalCDFSum(const Scalar s,
3185     const Scalar t) const
3186 {
3187   if (dimension_ != 1) throw InvalidDimensionException(HERE) << "RandomMixture::computeEquivalentNormalCDFSum(Scalar) is only possible for dimension 1";
3188 
3189   Scalar value = equivalentNormal_.computeProbability(Interval(s, t));
3190   UnsignedInteger i = 0;
3191   Scalar delta = 0.0;
3192   do
3193   {
3194     ++i;
3195     const Scalar step = 2.0 * M_PI * i / referenceBandwidth_[0];
3196     delta = (equivalentNormal_.computeCDF(t + step) - equivalentNormal_.computeCDF(s + step)) + (equivalentNormal_.computeCDF(t - step) - equivalentNormal_.computeCDF(s - step));
3197     value += delta;
3198   }
3199   while (delta > 0.0 * value);
3200   return value;
3201 }
3202 
3203 struct RandomMixturePair
3204 {
3205   Scalar norm_;
3206   Distribution distribution_;
RandomMixturePairRandomMixturePair3207   RandomMixturePair(): norm_(0.0), distribution_() {}
RandomMixturePairRandomMixturePair3208   RandomMixturePair(const Scalar norm, const Distribution & distribution): norm_(norm), distribution_(distribution) {}
3209 
operator <RandomMixturePair3210   Bool operator < (const RandomMixturePair & other) const
3211   {
3212     return norm_ < other.norm_;
3213   }
3214 };
3215 
3216 typedef Collection<RandomMixturePair> RandomMixturePairCollection;
3217 
3218 /** Project a RandomMixture over a Collection of DistributionFactory by using a regular sampling and Kolmogorov distance. */
project(const DistributionFactoryCollection & factoryCollection,Point & kolmogorovNorm,const UnsignedInteger size) const3219 DistributionCollection RandomMixture::project(const DistributionFactoryCollection & factoryCollection,
3220     Point & kolmogorovNorm,
3221     const UnsignedInteger size) const
3222 {
3223   if (getDimension() != 1) throw NotDefinedException(HERE) << "Error: cannot project random mixtures of dimension>1.";
3224   const UnsignedInteger factorySize = factoryCollection.getSize();
3225   RandomMixturePairCollection result(0);
3226   const Scalar mean = getMean()[0];
3227   const Scalar sigma = getStandardDeviation()[0];
3228   // Sample the quantile function uniformly over [mean +/- alpha * sigma]
3229   const Scalar qMin = computeCDF(mean - alpha_ * sigma);
3230   const Scalar qMax = computeCDF(mean + alpha_ * sigma);
3231   const Sample dataX(computeQuantile(qMin, qMax, size).getMarginal(1));
3232   // Loop over the factories
3233   for (UnsignedInteger i = 0; i < factorySize; ++i)
3234   {
3235     DistributionFactory factory(factoryCollection[i]);
3236     Distribution candidate;
3237     try
3238     {
3239       candidate = factory.build(dataX);
3240       LOGINFO(OSS() << "candidate " << i << " for the projection=" << candidate);
3241     }
3242     catch(...)
3243     {
3244       LOGWARN(OSS() << "Estimation failed for the factory " << factory.getImplementation()->getClassName() << ". It is removed from the set of factories.");
3245     }
3246     Scalar kolmogorov = 0.0;
3247     for (UnsignedInteger j = 0; j < size; ++j)
3248       kolmogorov = std::max(kolmogorov, std::abs(candidate.computeCDF(dataX(j, 0)) - (qMin + j * (qMax - qMin) / (size - 1.0))));
3249     result.add(RandomMixturePair(kolmogorov, candidate));
3250   }
3251   // Sort the results
3252   const UnsignedInteger resultSize = result.getSize();
3253   std::stable_sort(result.begin(), result.end());
3254   // Extract the results
3255   DistributionCollection distributionCollection(resultSize);
3256   kolmogorovNorm = Point(resultSize);
3257   for (UnsignedInteger i = 0; i < resultSize; ++i)
3258   {
3259     distributionCollection[i] = result[i].distribution_;
3260     kolmogorovNorm[i] = result[i].norm_;
3261   }
3262   return distributionCollection;
3263 }
3264 
3265 /* Get the i-th marginal distribution */
getMarginal(const UnsignedInteger i) const3266 Distribution RandomMixture::getMarginal(const UnsignedInteger i) const
3267 {
3268   const UnsignedInteger dimension = getDimension();
3269   if (i >= dimension) throw InvalidArgumentException(HERE) << "The index of a marginal distribution must be in the range [0, dim-1]";
3270   if (dimension == 1) return clone();
3271   RandomMixture::Implementation marginal(new RandomMixture(distributionCollection_, weights_.getRow(i), Point(1, constant_[i])));
3272   marginal->setDescription(Description(1, getDescription()[i]));
3273   return marginal;
3274 }
3275 
3276 /* Get the distribution of the marginal distribution corresponding to indices dimensions */
getMarginal(const Indices & indices) const3277 Distribution RandomMixture::getMarginal(const Indices & indices) const
3278 {
3279   const UnsignedInteger dimension = getDimension();
3280   if (!indices.check(dimension)) throw InvalidArgumentException(HERE) << "The indices of a marginal distribution must be in the range [0, dim-1] and must be different";
3281   if (dimension == 1) return clone();
3282   const UnsignedInteger outputDimension = indices.getSize();
3283   const UnsignedInteger size = distributionCollection_.getSize();
3284   Matrix marginalWeights(outputDimension, size);
3285   Point marginalConstant(outputDimension);
3286   Description description(getDescription());
3287   Description marginalDescription(outputDimension);
3288   for (UnsignedInteger i = 0; i < outputDimension; ++i)
3289   {
3290     const UnsignedInteger index_i = indices[i];
3291     marginalConstant[i] = constant_[index_i];
3292     const Matrix row(weights_.getRow(index_i));
3293     for (UnsignedInteger j = 0; j < outputDimension; ++j) marginalWeights(i, j) = row(0, j);
3294     marginalDescription[i] = description[index_i];
3295   }
3296   RandomMixture::Implementation marginal(new RandomMixture(distributionCollection_, marginalWeights, marginalConstant));
3297   marginal->setDescription(marginalDescription);
3298   return marginal;
3299 } // getMarginal(Indices)
3300 
3301 /* Tell if the distribution has independent copula */
hasIndependentCopula() const3302 Bool RandomMixture::hasIndependentCopula() const
3303 {
3304   return (getDimension() == 1);
3305 }
3306 
3307 /* Tell if the distribution has elliptical copula */
hasEllipticalCopula() const3308 Bool RandomMixture::hasEllipticalCopula() const
3309 {
3310   return (getDimension() == 1);
3311 }
3312 
3313 /* Check if the distribution is elliptical */
isElliptical() const3314 Bool RandomMixture::isElliptical() const
3315 {
3316   const UnsignedInteger size = distributionCollection_.getSize();
3317   // Case of a Dirac distribution
3318   if (size == 0) return true;
3319   for (UnsignedInteger i = 0; i < size; ++i)
3320     if (!distributionCollection_[i].isElliptical()) return false;
3321   return true;
3322 }
3323 
3324 /* Check if the distribution is continuous */
isContinuous() const3325 Bool RandomMixture::isContinuous() const
3326 {
3327   const UnsignedInteger size = distributionCollection_.getSize();
3328   for (UnsignedInteger i = 0; i < size; ++i)
3329     if (distributionCollection_[i].isContinuous()) return true;
3330   return false;
3331 }
3332 
3333 /* Check if the distribution is discrete */
isDiscrete() const3334 Bool RandomMixture::isDiscrete() const
3335 {
3336   const UnsignedInteger size = distributionCollection_.getSize();
3337   for (UnsignedInteger i = 0; i < size; ++i)
3338     if (!distributionCollection_[i].isDiscrete()) return false;
3339   return true;
3340 }
3341 
3342 /* Tell if the distribution is integer valued */
isIntegral() const3343 Bool RandomMixture::isIntegral() const
3344 {
3345   const UnsignedInteger size = distributionCollection_.getSize();
3346   const UnsignedInteger dimension = getDimension();
3347   for (UnsignedInteger i = 0; i < size; ++i)
3348   {
3349     // Check if the contributor is discrete
3350     if (!distributionCollection_[i].isDiscrete()) return false;
3351     // Check if all the weights are integer
3352     for (UnsignedInteger j = 0; j < dimension; ++j)
3353       if (weights_(j, i) != round(weights_(j, i))) return false;
3354   }
3355   return true;
3356 }
3357 
3358 /* Get the support of a discrete distribution that intersect a given interval */
getSupport(const Interval & interval) const3359 Sample RandomMixture::getSupport(const Interval & interval) const
3360 {
3361   if (interval.getDimension() != getDimension()) throw InvalidArgumentException(HERE) << "Error: the given interval has a dimension that does not match the distribution dimension.";
3362   if (!isDiscrete()) throw NotDefinedException(HERE) << "Error: the support is defined only for discrete distributions.";
3363   const UnsignedInteger size = distributionCollection_.getSize();
3364   const UnsignedInteger dimension = getDimension();
3365   // The computation of the support is available only if there is one atom
3366   // otherwise the computePDF() and computeCDF() methods are not implemented anyway
3367   Sample support(0, dimension);
3368   Sample supportCandidates;
3369   if (dimension == 1)
3370     supportCandidates = distributionCollection_[0].getSupport() * Point(*weights_.getColumn(0).getImplementation()) + constant_;
3371   else
3372   {
3373     const Sample support0 = distributionCollection_[0].getSupport();
3374     const Point scaling(*weights_.getColumn(0).getImplementation());
3375     supportCandidates = Sample(support0.getSize(), dimension);
3376     for (UnsignedInteger i = 0; i < support0.getSize(); ++i)
3377       supportCandidates[i] = scaling * support0(i, 0) + constant_;
3378   } // dimension > 1
3379   for (UnsignedInteger indexNext = 1; indexNext < size; ++indexNext)
3380   {
3381     Sample nextSupport;
3382     if (dimension == 1)
3383       nextSupport = distributionCollection_[indexNext].getSupport() * Point(*weights_.getColumn(indexNext).getImplementation());
3384     else
3385     {
3386       const Sample supportNext = distributionCollection_[indexNext].getSupport();
3387       const Point scaling(*weights_.getColumn(indexNext).getImplementation());
3388       nextSupport = Sample(supportNext.getSize(), dimension);
3389       for (UnsignedInteger i = 0; i < supportNext.getSize(); ++i)
3390         nextSupport[i] = scaling * supportNext(i, 0) + constant_;
3391     } // dimension > 1
3392     const UnsignedInteger supportCandidatesSize = supportCandidates.getSize();
3393     const UnsignedInteger nextSupportSize = nextSupport.getSize();
3394     Sample newSupportCandidate(supportCandidatesSize * nextSupportSize, dimension);
3395     UnsignedInteger k = 0;
3396     for (UnsignedInteger indexCandidates = 0; indexCandidates < supportCandidatesSize; ++indexCandidates)
3397     {
3398       const Point xI(supportCandidates[indexCandidates]);
3399       for (UnsignedInteger indexNext2 = 0; indexNext2 < nextSupportSize; ++indexNext2)
3400       {
3401         const Point xJ(nextSupport[indexNext2]);
3402         newSupportCandidate[k] = xI + xJ;
3403         ++k;
3404       } // indexNext2
3405     } // indexCandidates
3406     // Remove duplicates
3407     supportCandidates = newSupportCandidate.sortUnique();
3408   } // loop over the other atoms
3409   for (UnsignedInteger i = 0; i < supportCandidates.getSize(); ++i)
3410   {
3411     const Point candidate(supportCandidates[i]);
3412     if (interval.contains(candidate)) support.add(candidate);
3413   }
3414   return support;
3415 }
3416 
3417 /* Method save() stores the object through the StorageManager */
save(Advocate & adv) const3418 void RandomMixture::save(Advocate & adv) const
3419 {
3420   DistributionImplementation::save(adv);
3421   adv.saveAttribute( "distributionCollection_", distributionCollection_  );
3422   adv.saveAttribute( "constant_", constant_ );
3423   adv.saveAttribute( "weights_", weights_ );
3424   adv.saveAttribute( "positionIndicator_", positionIndicator_ );
3425   adv.saveAttribute( "isAlreadyComputedPositionIndicator_", isAlreadyComputedPositionIndicator_ );
3426   adv.saveAttribute( "dispersionIndicator_", dispersionIndicator_ );
3427   adv.saveAttribute( "isAlreadyComputedDispersionIndicator_", isAlreadyComputedDispersionIndicator_ );
3428   adv.saveAttribute( "blockMin_", blockMin_ );
3429   adv.saveAttribute( "blockMax_", blockMax_ );
3430   adv.saveAttribute( "referenceBandwidth_", referenceBandwidth_ );
3431   adv.saveAttribute( "referenceBandwidthFactor_", referenceBandwidthFactor_ );
3432   adv.saveAttribute( "maxSize_", maxSize_ );
3433   adv.saveAttribute( "storedSize_", storedSize_ );
3434   adv.saveAttribute( "characteristicValuesCache_", characteristicValuesCache_ );
3435   adv.saveAttribute( "alpha_", alpha_ );
3436   adv.saveAttribute( "beta_", beta_ );
3437   adv.saveAttribute( "pdfPrecision_", pdfPrecision_ );
3438   adv.saveAttribute( "cdfPrecision_", cdfPrecision_ );
3439   adv.saveAttribute( "inverseWeights_", inverseWeights_ );
3440   adv.saveAttribute( "detWeightsInverse_", detWeightsInverse_ );
3441   adv.saveAttribute( "fftAlgorithm_", fftAlgorithm_ );
3442   adv.saveAttribute( "isAnalytical_", isAnalytical_ );
3443 } // save
3444 
3445 /* Method load() reloads the object from the StorageManager */
load(Advocate & adv)3446 void RandomMixture::load(Advocate & adv)
3447 {
3448   DistributionImplementation::load(adv);
3449   adv.loadAttribute( "distributionCollection_", distributionCollection_  );
3450   adv.loadAttribute( "constant_", constant_ );
3451   adv.loadAttribute( "weights_", weights_ );
3452   adv.loadAttribute( "positionIndicator_", positionIndicator_ );
3453   adv.loadAttribute( "isAlreadyComputedPositionIndicator_", isAlreadyComputedPositionIndicator_ );
3454   adv.loadAttribute( "dispersionIndicator_", dispersionIndicator_ );
3455   adv.loadAttribute( "isAlreadyComputedDispersionIndicator_", isAlreadyComputedDispersionIndicator_ );
3456   adv.loadAttribute( "blockMin_", blockMin_ );
3457   adv.loadAttribute( "blockMax_", blockMax_ );
3458   adv.loadAttribute( "referenceBandwidth_", referenceBandwidth_ );
3459   adv.loadAttribute( "referenceBandwidthFactor_", referenceBandwidthFactor_ );
3460   adv.loadAttribute( "maxSize_", maxSize_ );
3461   adv.loadAttribute( "storedSize_", storedSize_ );
3462   adv.loadAttribute( "characteristicValuesCache_", characteristicValuesCache_ );
3463   adv.loadAttribute( "alpha_", alpha_ );
3464   adv.loadAttribute( "beta_", beta_ );
3465   adv.loadAttribute( "pdfPrecision_", pdfPrecision_ );
3466   adv.loadAttribute( "cdfPrecision_", cdfPrecision_ );
3467   adv.loadAttribute( "inverseWeights_", inverseWeights_ );
3468   adv.loadAttribute( "detWeightsInverse_", detWeightsInverse_ );
3469   adv.loadAttribute( "fftAlgorithm_", fftAlgorithm_ );
3470   adv.loadAttribute( "isAnalytical_", isAnalytical_ );
3471   computePositionIndicator();
3472   computeDispersionIndicator();
3473   computeRange();
3474   computeReferenceBandwidth();
3475   computeEquivalentNormal();
3476 } // load
3477 
3478 END_NAMESPACE_OPENTURNS
3479