1 /*
2  * Copyright © 2004 Ondra Kamenik
3  * Copyright © 2019 Dynare Team
4  *
5  * This file is part of Dynare.
6  *
7  * Dynare is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * Dynare is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "rfs_tensor.hh"
22 #include "kron_prod.hh"
23 #include "tl_exception.hh"
24 
25 /* The conversion from unfolded to folded sums up all data from unfolded
26    corresponding to one folded index. So we go through all the rows in the
27    unfolded tensor ‘ut’, make an index of the folded tensor by sorting the
28    coordinates, and add the row. */
FRTensor(const URTensor & ut)29 FRTensor::FRTensor(const URTensor &ut)
30   : FTensor(indor::along_row, IntSequence(ut.dimen(), ut.nvar()),
31             FFSTensor::calcMaxOffset(ut.nvar(), ut.dimen()), ut.ncols(),
32             ut.dimen()),
33     nv(ut.nvar())
34 {
35   zeros();
36   for (index in = ut.begin(); in != ut.end(); ++in)
37     {
38       IntSequence vtmp(in.getCoor());
39       vtmp.sort();
40       index tar(*this, vtmp);
41       addRow(ut, *in, *tar);
42     }
43 }
44 
45 std::unique_ptr<UTensor>
unfold() const46 FRTensor::unfold() const
47 {
48   return std::make_unique<URTensor>(*this);
49 }
50 
51 /* Incrementing is easy. The same as for FFSTensor. */
52 
53 void
increment(IntSequence & v) const54 FRTensor::increment(IntSequence &v) const
55 {
56   TL_RAISE_IF(v.size() != dimen(),
57               "Wrong input/output vector size in FRTensor::increment");
58 
59   UTensor::increment(v, nv);
60   v.monotone();
61 }
62 
63 void
decrement(IntSequence & v) const64 FRTensor::decrement(IntSequence &v) const
65 {
66   TL_RAISE_IF(v.size() != dimen(),
67               "Wrong input/output vector size in FRTensor::decrement");
68 
69   FTensor::decrement(v, nv);
70 }
71 
72 /* Here we convert folded full symmetry tensor to unfolded. We copy all
73    columns of folded tensor to unfolded and leave other columns
74    (duplicates) zero. In this way, if the unfolded tensor is folded back,
75    we should get the same data. */
URTensor(const FRTensor & ft)76 URTensor::URTensor(const FRTensor &ft)
77   : UTensor(indor::along_row, IntSequence(ft.dimen(), ft.nvar()),
78             UFSTensor::calcMaxOffset(ft.nvar(), ft.dimen()), ft.ncols(),
79             ft.dimen()),
80     nv(ft.nvar())
81 {
82   zeros();
83   for (index src = ft.begin(); src != ft.end(); ++src)
84     {
85       index in(*this, src.getCoor());
86       copyRow(ft, *src, *in);
87     }
88 }
89 
90 std::unique_ptr<FTensor>
fold() const91 URTensor::fold() const
92 {
93   return std::make_unique<FRTensor>(*this);
94 }
95 
96 void
increment(IntSequence & v) const97 URTensor::increment(IntSequence &v) const
98 {
99   TL_RAISE_IF(v.size() != dimen(),
100               "Wrong input/output vector size in URTensor::increment");
101 
102   UTensor::increment(v, nv);
103 }
104 
105 void
decrement(IntSequence & v) const106 URTensor::decrement(IntSequence &v) const
107 {
108   TL_RAISE_IF(v.size() != dimen(),
109               "Wrong input/output vector size in URTensor::decrement");
110 
111   UTensor::decrement(v, nv);
112 }
113 
114 int
getOffset(const IntSequence & v) const115 URTensor::getOffset(const IntSequence &v) const
116 {
117   TL_RAISE_IF(v.size() != dimen(),
118               "Wrong input vector size in URTensor::getOffset");
119 
120   return UTensor::getOffset(v, nv);
121 }
122 
123 /* Here we construct v₁⊗v₂⊗…⊗vₙ, where v₁,v₂,…,vₙ are stored in a
124    std::vector<ConstVector>. */
125 
URSingleTensor(const std::vector<ConstVector> & cols)126 URSingleTensor::URSingleTensor(const std::vector<ConstVector> &cols)
127   : URTensor(1, cols[0].length(), cols.size())
128 {
129   if (dimen() == 1)
130     {
131       getData() = cols[0];
132       return;
133     }
134 
135   auto last = std::make_unique<Vector>(cols[cols.size()-1]);
136   for (int i = cols.size()-2; i > 0; i--)
137     {
138       auto newlast = std::make_unique<Vector>(power(nvar(), cols.size()-i));
139       KronProd::kronMult(cols[i], ConstVector(*last), *newlast);
140       last = std::move(newlast);
141     }
142   KronProd::kronMult(cols[0], ConstVector(*last), getData());
143 }
144 
145 /* Here we construct v⊗…⊗v, where ‘d’ gives the number of copies of v. */
146 
URSingleTensor(const ConstVector & v,int d)147 URSingleTensor::URSingleTensor(const ConstVector &v, int d)
148   : URTensor(1, v.length(), d)
149 {
150   if (d == 1)
151     {
152       getData() = v;
153       return;
154     }
155 
156   auto last = std::make_unique<Vector>(v);
157   for (int i = d-2; i > 0; i--)
158     {
159       auto newlast = std::make_unique<Vector>(last->length()*v.length());
160       KronProd::kronMult(v, ConstVector(*last), *newlast);
161       last = std::move(newlast);
162     }
163   KronProd::kronMult(v, ConstVector(*last), getData());
164 }
165 
166 std::unique_ptr<FTensor>
fold() const167 URSingleTensor::fold() const
168 {
169   return std::make_unique<FRSingleTensor>(*this);
170 }
171 
172 /* The conversion from unfolded URSingleTensor to folded FRSingleTensor is
173    exactly the same as the conversion from URTensor to FRTensor, except that we
174    do not copy rows but elements. */
FRSingleTensor(const URSingleTensor & ut)175 FRSingleTensor::FRSingleTensor(const URSingleTensor &ut)
176   : FRTensor(1, ut.nvar(), ut.dimen())
177 {
178   zeros();
179   for (index in = ut.begin(); in != ut.end(); ++in)
180     {
181       IntSequence vtmp(in.getCoor());
182       vtmp.sort();
183       index tar(*this, vtmp);
184       get(*tar, 0) += ut.get(*in, 0);
185     }
186 }
187