1 /*
2  * Copyright © 2004 Ondra Kamenik
3  * Copyright © 2019 Dynare Team
4  *
5  * This file is part of Dynare.
6  *
7  * Dynare is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * Dynare is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 // Tensor containers.
22 
23 /* One of primary purposes of the tensor library is to perform one step
24    of the Faà Di Bruno formula:
25 
26 27     [B_sᵏ]_α₁…αₗ = [h_zˡ]_γ₁…γₗ    ∑     ∏  [g_{s^|cₘ|}]_cₘ(α)^γₘ
28                                 c∈ℳₗ,ₖ ᵐ⁼¹
29 
30    where [h_zˡ] and [g_sⁱ] are tensors, ℳₗ,ₖ is the set of all equivalences
31    with l classes of k element set, cₘ is an m-class of equivalence c, and |cₘ|
32    is its cardinality. Further, cₘ(α) is the sequence of α’s picked by
33    equivalence class cₘ.
34 
35    In order to accomplish this operation, we basically need some storage of all
36    tensors of the form [g_sⁱ]. Note that s can be compound, for instance
37    s=(y,u). Then we need storage for [g_y³], [g_y²u], [g_yu⁵], etc.
38 
39    We need an object holding all tensors of the same type. Here type means an
40    information, that coordinates of the tensors can be of type y or u. We will
41    group only tensors, whose symmetry is described by the Symmetry class. These
42    are only y²u³, not yuyu². So, we are going to define a class which will hold
43    tensors whose symmetries are of type Symmetry and have the same symmetry
44    length (number of different coordinate types). Also, for each symmetry there
45    will be at most one tensor.
46 
47    The class has two purposes. The first is to provide storage (insert and
48    retrieve). The second is to perform the above step of Faà Di Bruno. This is
49    going through all equivalences with $l$ classes, perform the tensor product
50    and add to the result.
51 
52    We define a template class TensorContainer. From different instantiations of
53    the template class we will inherit to create concrete classes, for example
54    container of unfolded general symmetric tensors. The one step of the Faà Di
55    Bruno (we call it multAndAdd()) is implemented in the concrete subclasses,
56    because the implementation depends on storage. Note even, that multAndAdd()
57    has not a template common declaration. This is because sparse tensor h is
58    multiplied by folded tensors g yielding folded tensor B, but unfolded tensor
59    h is multiplied by unfolded tensors g yielding unfolded tensor B. */
60 
61 #ifndef T_CONTAINER_H
62 #define T_CONTAINER_H
63 
64 #include "symmetry.hh"
65 #include "gs_tensor.hh"
66 #include "tl_exception.hh"
67 #include "tl_static.hh"
68 #include "sparse_tensor.hh"
69 #include "equivalence.hh"
70 #include "rfs_tensor.hh"
71 #include "Vector.hh"
72 
73 #include <map>
74 #include <string>
75 #include <memory>
76 #include <utility>
77 
78 #include <matio.h>
79 
80 // ltsym predicate
81 /* We need a predicate on strict weak ordering of
82    symmetries. */
83 struct ltsym
84 {
85   bool
operator ()ltsym86   operator()(const Symmetry &s1, const Symmetry &s2) const
87   {
88     return s1 < s2;
89   }
90 };
91 
92 /* Here we define the template class for tensor container. We implement it as
93    an stl::map. It is a unique container, no two tensors with same symmetries
94    can coexist. Keys of the map are symmetries, values are pointers to tensor.
95    The class is responsible for deallocating all tensors. Creation of the
96    tensors is done outside.
97 
98    The class has integer ‘n’ as its member. It is a number of different
99    coordinate types of all contained tensors. Besides intuitive insert and
100    retrieve interface, we define a method fetchTensors(), which for a given
101    symmetry and given equivalence calculates symmetries implied by the symmetry
102    and all equivalence classes, and fetches corresponding tensors in a vector.
103 
104    Also, each instance of the container has a reference to EquivalenceBundle
105    which allows an access to equivalences. */
106 
107 template<class _Ttype>
108 class TensorContainer
109 {
110 protected:
111   using _Map = std::map<Symmetry, std::unique_ptr<_Ttype>, ltsym>;
112 private:
113   int n;
114   _Map m;
115 public:
TensorContainer(int nn)116   TensorContainer(int nn)
117     : n(nn)
118   {
119   }
120   /* This is just a copy constructor. This makes a hard copy of all tensors. */
TensorContainer(const TensorContainer<_Ttype> & c)121   TensorContainer(const TensorContainer<_Ttype> &c)
122     : n(c.n)
123   {
124     for (const auto &it : c.m)
125       insert(std::make_unique<_Ttype>(*(it.second)));
126   }
127   TensorContainer(TensorContainer<_Ttype> &&) = default;
128 
129   // TensorContainer subtensor constructor
130   /* This constructor constructs a new tensor container, whose tensors
131      are in-place subtensors of the given container. */
TensorContainer(int first_row,int num,TensorContainer<_Ttype> & c)132   TensorContainer(int first_row, int num, TensorContainer<_Ttype> &c)
133     : n(c.n)
134   {
135     for (const auto &it : c.m)
136       insert(std::make_unique<_Ttype>(first_row, num, *(it.second)));
137   }
138 
139   TensorContainer<_Ttype> &
operator =(const TensorContainer<_Ttype> & c)140   operator=(const TensorContainer<_Ttype> &c)
141   {
142     n = c.n;
143     m.clear();
144     for (const auto &it : c.m)
145       insert(std::make_unique<_Ttype>(*(it.second)));
146   }
147   TensorContainer<_Ttype> &operator=(TensorContainer<_Ttype> &&) = default;
148 
149   const _Ttype &
get(const Symmetry & s) const150   get(const Symmetry &s) const
151   {
152     TL_RAISE_IF(s.num() != num(),
153                 "Incompatible symmetry lookup in TensorContainer::get");
154     auto it = m.find(s);
155     TL_RAISE_IF(it == m.end(), "Symmetry not found in TensorContainer::get");
156     return *(it->second);
157   }
158 
159   _Ttype &
get(const Symmetry & s)160   get(const Symmetry &s)
161   {
162     TL_RAISE_IF(s.num() != num(),
163                 "Incompatible symmetry lookup in TensorContainer::get");
164     auto it = m.find(s);
165     TL_RAISE_IF(it == m.end(), "Symmetry not found in TensorContainer::get");
166     return *(it->second);
167   }
168 
169   bool
check(const Symmetry & s) const170   check(const Symmetry &s) const
171   {
172     TL_RAISE_IF(s.num() != num(),
173                 "Incompatible symmetry lookup in TensorContainer::check");
174     auto it = m.find(s);
175     return it != m.end();
176   }
177 
178   virtual void
insert(std::unique_ptr<_Ttype> t)179   insert(std::unique_ptr<_Ttype> t)
180   {
181     TL_RAISE_IF(t->getSym().num() != num(),
182                 "Incompatible symmetry insertion in TensorContainer::insert");
183     TL_RAISE_IF(check(t->getSym()),
184                 "Tensor already in container in TensorContainer::insert");
185     if (!t->isFinite())
186       throw TLException(__FILE__, __LINE__, "NaN or Inf asserted in TensorContainer::insert");
187     m.emplace(t->getSym(), std::move(t));
188   }
189 
190   void
remove(const Symmetry & s)191   remove(const Symmetry &s)
192   {
193     m.erase(s);
194   }
195 
196   void
clear()197   clear()
198   {
199     m.clear();
200   }
201 
202   int
getMaxDim() const203   getMaxDim() const
204   {
205     int res = -1;
206     for (const auto &run : m)
207       {
208         int dim = run.first.dimen();
209         if (dim > res)
210           res = dim;
211       }
212     return res;
213   }
214 
215   /* Debug print. */
216   void
print() const217   print() const
218   {
219     std::cout << "Tensor container: nvars=" << n << ", tensors=" << m.size() << '\n';
220     for (auto &it : *this)
221       {
222         std::cout << "Symmetry: ";
223         it.first.print();
224         it.second->print();
225       }
226   }
227 
228   /* Output to the MAT file. */
229   void
writeMat(mat_t * fd,const std::string & prefix) const230   writeMat(mat_t *fd, const std::string &prefix) const
231   {
232     for (auto &it : *this)
233       {
234         std::string lname = prefix + "_g";
235         const Symmetry &sym = it.first;
236         for (int i = 0; i < sym.num(); i++)
237           lname += '_' + std::to_string(sym[i]);
238         ConstTwoDMatrix m(*(it.second));
239         m.writeMat(fd, lname);
240       }
241   }
242 
243   /* Output to the Memory Map. */
244   void
writeMMap(std::map<std::string,ConstTwoDMatrix> & mm,const std::string & prefix) const245   writeMMap(std::map<std::string, ConstTwoDMatrix> &mm, const std::string &prefix) const
246   {
247     for (auto &it : *this)
248       {
249         std::string lname = prefix + "_g";
250         const Symmetry &sym = it.first;
251         for (int i = 0; i < sym.num(); i++)
252           lname += '_' + std::to_string(sym[i]);
253         mm.emplace(lname, ConstTwoDMatrix(*(it.second)));
254       }
255   }
256 
257   /* Here we fetch all tensors given by symmetry and equivalence. We go
258      through all equivalence classes, calculate implied symmetry, and
259      fetch its tensor storing it in the same order to the vector. */
260 
261   std::vector<const _Ttype *>
fetchTensors(const Symmetry & rsym,const Equivalence & e) const262   fetchTensors(const Symmetry &rsym, const Equivalence &e) const
263   {
264     std::vector<const _Ttype *> res(e.numClasses());
265     int i = 0;
266     for (auto it = e.begin(); it != e.end(); ++it, i++)
267       {
268         Symmetry s(rsym, *it);
269         res[i] = &get(s);
270       }
271     return res;
272   }
273 
274   virtual ~TensorContainer() = default;
275 
276   int
num() const277   num() const
278   {
279     return n;
280   }
281 
282   auto
begin() const283   begin() const
284   {
285     return m.begin();
286   }
287   auto
end() const288   end() const
289   {
290     return m.end();
291   }
292   auto
begin()293   begin()
294   {
295     return m.begin();
296   }
297   auto
end()298   end()
299   {
300     return m.end();
301   }
302 };
303 
304 /* Here is a container storing UGSTensor’s. We declare multAndAdd() method. */
305 
306 class FGSContainer;
307 class UGSContainer : public TensorContainer<UGSTensor>
308 {
309 public:
UGSContainer(int nn)310   UGSContainer(int nn)
311     : TensorContainer<UGSTensor>(nn)
312   {
313   }
314   UGSContainer(const FGSContainer &c);
315   void multAndAdd(const UGSTensor &t, UGSTensor &out) const;
316 };
317 
318 /* Here is a container storing FGSTensor’s. We declare two versions of
319    multAndAdd() method. The first works for folded B and folded h tensors, the
320    second works for folded B and unfolded h. There is no point to do it for
321    unfolded B since the algorithm goes through all the indices of B and
322    calculates corresponding columns. So, if B is needed unfolded, it is more
323    effective to calculate its folded version and then unfold by conversion.
324 
325    The static member ‘num_one_time’ is a number of columns formed from product
326    of g tensors at one time. This is subject to change, probably we will have
327    to do some tuning and decide about this number based on symmetries, and
328    dimensions in the runtime. */
329 
330 class FGSContainer : public TensorContainer<FGSTensor>
331 {
332   static constexpr int num_one_time = 10;
333 public:
FGSContainer(int nn)334   FGSContainer(int nn)
335     : TensorContainer<FGSTensor>(nn)
336   {
337   }
338   FGSContainer(const UGSContainer &c);
339   void multAndAdd(const FGSTensor &t, FGSTensor &out) const;
340   void multAndAdd(const UGSTensor &t, FGSTensor &out) const;
341 private:
342   static Tensor::index getIndices(int num, std::vector<IntSequence> &out,
343                                   const Tensor::index &start,
344                                   const Tensor::index &end);
345 };
346 
347 #endif
348