1 /*
2 * Copyright © 2004 Ondra Kamenik
3 * Copyright © 2019 Dynare Team
4 *
5 * This file is part of Dynare.
6 *
7 * Dynare is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * Dynare is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with Dynare. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "gs_tensor.hh"
22 #include "sparse_tensor.hh"
23 #include "tl_exception.hh"
24 #include "kron_prod.hh"
25 #include "pascal_triangle.hh"
26
27 /* Constructor used for slicing fully symmetric tensor. It constructs the
28 dimensions from the partitioning of variables of fully symmetric tensor. Let
29 the partitioning be, for instance, (a,b,c,d), where (n_a,n_b,n_c,n_d)
30 are lengths of the partitions. Let one want to get a slice only of the part
31 of the fully symmetric tensor corresponding to indices of the form b²d³.
32 This corresponds to the symmetry a⁰b²c⁰d³. So, the dimension of the
33 slice would be also (n_a,n_b,n_c,n_d) for number of variables and
34 (0,2,0,3) for the symmetry. So we provide the constructor which takes
35 sizes of partitions (n_a,n_b,n_c,n_d) as IntSequence, and indices of
36 picked partitions, in our case (1,1,3,3,3), as IntSequence. */
37
TensorDimens(const IntSequence & ss,const IntSequence & coor)38 TensorDimens::TensorDimens(const IntSequence &ss, const IntSequence &coor)
39 : nvs(ss),
40 sym(ss.size()),
41 nvmax(coor.size(), 0)
42 {
43 TL_RAISE_IF(!coor.isSorted(),
44 "Coordinates not sorted in TensorDimens slicing constructor");
45 TL_RAISE_IF(coor[0] < 0 || coor[coor.size()-1] >= ss.size(),
46 "A coordinate out of stack range in TensorDimens slicing constructor");
47
48 for (int i = 0; i < coor.size(); i++)
49 {
50 sym[coor[i]]++;
51 nvmax[i] = ss[coor[i]];
52 }
53 }
54
55 /* Number of unfold offsets is a product of all members of nvmax. */
56 int
calcUnfoldMaxOffset() const57 TensorDimens::calcUnfoldMaxOffset() const
58 {
59 return nvmax.mult();
60 }
61
62 /* Number of folded offsets is a product of all unfold offsets within
63 each equivalence class of the symmetry. */
64
65 int
calcFoldMaxOffset() const66 TensorDimens::calcFoldMaxOffset() const
67 {
68 int res = 1;
69 for (int i = 0; i < nvs.size(); i++)
70 {
71 if (nvs[i] == 0 && sym[i] > 0)
72 return 0;
73 if (sym[i] > 0)
74 res *= PascalTriangle::noverk(nvs[i]+sym[i]-1, sym[i]);
75 }
76 return res;
77 }
78
79 /* Here we implement offset calculation for folded general symmetry
80 tensor. The offset of a given sequence is calculated by breaking the
81 sequence to subsequences according to the symmetry. The offset is
82 orthogonal with respect to the blocks, this means that indexing within
83 the blocks is independent. If there are two blocks, for instance, then
84 the offset will be an offset within the outer block (the first)
85 multiplied with all offsets of the inner block (last) plus an offset
86 within the second block.
87
88 Generally, the resulting offset will be
89 ₛ ₛ
90 r = ∑ rᵢ· ∏ nⱼ
91 ⁱ⁼¹ ʲ⁼ⁱ⁺¹
92 where s is the number of blocks (getSym().num()), rᵢ is the offset
93 within the i-th block, and nⱼ is the number of all offsets in the j-th
94 block.
95
96 In the code, we go from the innermost to the outermost, maintaining the
97 product in ‘pow’. */
98
99 int
calcFoldOffset(const IntSequence & v) const100 TensorDimens::calcFoldOffset(const IntSequence &v) const
101 {
102 TL_RAISE_IF(v.size() != dimen(),
103 "Wrong input vector size in TensorDimens::getFoldOffset");
104
105 int res = 0;
106 int pow = 1;
107 int blstart = v.size();
108 for (int ibl = getSym().num()-1; ibl >= 0; ibl--)
109 {
110 int bldim = getSym()[ibl];
111 if (bldim > 0)
112 {
113 blstart -= bldim;
114 int blnvar = getNVX(blstart);
115 IntSequence subv(v, blstart, blstart+bldim);
116 res += FTensor::getOffset(subv, blnvar)*pow;
117 pow *= FFSTensor::calcMaxOffset(blnvar, bldim);
118 }
119 }
120 TL_RAISE_IF(blstart != 0,
121 "Error in tracing symmetry in TensorDimens::getFoldOffset");
122 return res;
123 }
124
125 /* In order to find the predecessor of index within folded generally
126 symmetric tensor, note, that a decrease action in i-th partition of
127 symmetric indices can happen only if all indices in all subsequent
128 partitions are zero. Then the decrease action of whole the index
129 consists of decrease action of the first nonzero partition from the
130 right, and setting these trailing zero partitions to their maximum
131 indices.
132
133 So we set ‘iblock’ to the number of last partitions. During the
134 execution, ‘block_first’, and ‘block_last’ will point to the first
135 element of ‘iblock’ and, first element of following block.
136
137 Then we check for all trailing zero partitions, set them to their
138 maximums and return ‘iblock’ to point to the first non-zero partition
139 (or the first partition). Then for this partition, we decrease the
140 index (fully symmetric within that partition). */
141
142 void
decrement(IntSequence & v) const143 TensorDimens::decrement(IntSequence &v) const
144 {
145 TL_RAISE_IF(getNVX().size() != v.size(),
146 "Wrong size of input/output sequence in TensorDimens::decrement");
147
148 int iblock = getSym().num()-1;
149 int block_last = v.size();
150 int block_first = block_last-getSym()[iblock];
151
152 // check for zero trailing blocks
153 while (iblock > 0 && v[block_last-1] == 0)
154 {
155 for (int i = block_first; i < block_last; i++)
156 v[i] = getNVX(i); // equivalent to nvs[iblock]
157 iblock--;
158 block_last = block_first;
159 block_first -= getSym()[iblock];
160 }
161
162 // decrease the non-zero block
163 IntSequence vtmp(v, block_first, block_last);
164 FTensor::decrement(vtmp, getNVX(block_first));
165 }
166
167 // FGSTensor conversion from UGSTensor
168 /* Here we go through columns of folded, calculate column of unfolded,
169 and copy data. */
FGSTensor(const UGSTensor & ut)170 FGSTensor::FGSTensor(const UGSTensor &ut)
171 : FTensor(indor::along_col, ut.tdims.getNVX(), ut.nrows(),
172 ut.tdims.calcFoldMaxOffset(), ut.dimen()),
173 tdims(ut.tdims)
174 {
175 for (index ti = begin(); ti != end(); ++ti)
176 {
177 index ui(ut, ti.getCoor());
178 copyColumn(ut, *ui, *ti);
179 }
180 }
181
182 // FGSTensor slicing constructor from FSSparseTensor
183 /* Here is the code of slicing constructor from the sparse tensor. We
184 first calculate coordinates of first and last index of the slice
185 within the sparse tensor (these are ‘lb’ and ‘ub’), and then we
186 iterate through all items between them (in lexicographical ordering of
187 sparse tensor), and check whether an item is between the ‘lb’ and ‘ub’
188 in Cartesian ordering (this corresponds to belonging to the
189 slices). If it belongs, then we subtract the lower bound ‘lb’ to
190 obtain coordinates in the ‘this’ tensor and we copy the item. */
FGSTensor(const FSSparseTensor & t,const IntSequence & ss,const IntSequence & coor,TensorDimens td)191 FGSTensor::FGSTensor(const FSSparseTensor &t, const IntSequence &ss,
192 const IntSequence &coor, TensorDimens td)
193 : FTensor(indor::along_col, td.getNVX(), t.nrows(),
194 td.calcFoldMaxOffset(), td.dimen()),
195 tdims(std::move(td))
196 {
197 // set ‘lb’ and ‘ub’ to lower and upper bounds of indices
198 /* Here we first set ‘s_offsets’ to offsets of partitions whose lengths
199 are given by ‘ss’. So ‘s_offsets’ is a cumulative sum of ‘ss’.
200
201 Then we create ‘lb’ to be coordinates of the possibly first index from
202 the slice, and ‘ub’ to be coordinates of possibly last index of the
203 slice. */
204 IntSequence s_offsets(ss.size(), 0);
205 for (int i = 1; i < ss.size(); i++)
206 s_offsets[i] = s_offsets[i-1] + ss[i-1];
207
208 IntSequence lb(coor.size());
209 IntSequence ub(coor.size());
210 for (int i = 0; i < coor.size(); i++)
211 {
212 lb[i] = s_offsets[coor[i]];
213 ub[i] = s_offsets[coor[i]] + ss[coor[i]] - 1;
214 }
215
216 zeros();
217 auto lbi = t.getMap().lower_bound(lb);
218 auto ubi = t.getMap().upper_bound(ub);
219 for (auto run = lbi; run != ubi; ++run)
220 if (lb.lessEq(run->first) && run->first.lessEq(ub))
221 {
222 IntSequence c(run->first);
223 c.add(-1, lb);
224 Tensor::index ind(*this, c);
225 TL_RAISE_IF(*ind < 0 || *ind >= ncols(),
226 "Internal error in slicing constructor of FGSTensor");
227 get(run->second.first, *ind) = run->second.second;
228 }
229 }
230
231 // FGSTensor slicing from FFSTensor
232 /* The code is similar to FGSTensor slicing from FSSparseTensor. */
FGSTensor(const FFSTensor & t,const IntSequence & ss,const IntSequence & coor,TensorDimens td)233 FGSTensor::FGSTensor(const FFSTensor &t, const IntSequence &ss,
234 const IntSequence &coor, TensorDimens td)
235 : FTensor(indor::along_col, td.getNVX(), t.nrows(),
236 td.calcFoldMaxOffset(), td.dimen()),
237 tdims(std::move(td))
238 {
239 if (ncols() == 0)
240 return;
241
242 // set ‘lb’ and ‘ub’ to lower and upper bounds of indices
243 /* Same code as in the previous converting constructor */
244 IntSequence s_offsets(ss.size(), 0);
245 for (int i = 1; i < ss.size(); i++)
246 s_offsets[i] = s_offsets[i-1] + ss[i-1];
247
248 IntSequence lb(coor.size());
249 IntSequence ub(coor.size());
250 for (int i = 0; i < coor.size(); i++)
251 {
252 lb[i] = s_offsets[coor[i]];
253 ub[i] = s_offsets[coor[i]] + ss[coor[i]] - 1;
254 }
255
256 zeros();
257 Tensor::index lbi(t, lb);
258 Tensor::index ubi(t, ub);
259 ++ubi;
260 for (Tensor::index run = lbi; run != ubi; ++run)
261 {
262 if (lb.lessEq(run.getCoor()) && run.getCoor().lessEq(ub))
263 {
264 IntSequence c(run.getCoor());
265 c.add(-1, lb);
266 Tensor::index ind(*this, c);
267 TL_RAISE_IF(*ind < 0 || *ind >= ncols(),
268 "Internal error in slicing constructor of FGSTensor");
269 copyColumn(t, *run, *ind);
270 }
271 }
272 }
273
274 // FGSTensor conversion from GSSparseTensor
FGSTensor(const GSSparseTensor & t)275 FGSTensor::FGSTensor(const GSSparseTensor &t)
276 : FTensor(indor::along_col, t.getDims().getNVX(), t.nrows(),
277 t.getDims().calcFoldMaxOffset(), t.dimen()), tdims(t.getDims())
278 {
279 zeros();
280 for (const auto &it : t.getMap())
281 {
282 index ind(*this, it.first);
283 get(it.second.first, *ind) = it.second.second;
284 }
285 }
286
287 /* First we increment as unfolded, then we must monotonize within
288 partitions defined by the symmetry. This is done by
289 IntSequence::pmonotone(). */
290
291 void
increment(IntSequence & v) const292 FGSTensor::increment(IntSequence &v) const
293 {
294 TL_RAISE_IF(v.size() != dimen(),
295 "Wrong input/output vector size in FGSTensor::increment");
296
297 UTensor::increment(v, tdims.getNVX());
298 v.pmonotone(tdims.getSym());
299 }
300
301 /* Return unfolded version of the tensor. */
302 std::unique_ptr<UTensor>
unfold() const303 FGSTensor::unfold() const
304 {
305 return std::make_unique<UGSTensor>(*this);
306 }
307
308 /* Here we implement the contraction
309
310 [r_xⁱzᵏ]_α₁…αᵢγ₁…γₖ = [t_xⁱyʲzᵏ]_α₁…αᵢβ₁…βⱼγ₁…γₖ·[c]^β₁…βⱼ
311
312 More generally, xⁱ and zᵏ can represent also general symmetries.
313
314 The operation can be rewritten as a matrix product
315
316 [t_xⁱyʲzᵏ]·(Iₗ⊗c⊗Iᵣ)
317
318 where l is a number of columns in tensor with symmetry on the left
319 (i.e. xⁱ), and r is a number of columns in tensor with a symmetry
320 on the right (i.e. zᵏ). The code proceeds accordingly. We first
321 form two symmetries ‘sym_left’ and ‘sym_right’, then calculate the
322 number of columns dleft=l and dright=r, form the Kronecker
323 product and multiply and add.
324
325 The input parameter ‘i’ is the order of a variable being contracted
326 starting from 0. */
327
328 void
contractAndAdd(int i,FGSTensor & out,const FRSingleTensor & col) const329 FGSTensor::contractAndAdd(int i, FGSTensor &out,
330 const FRSingleTensor &col) const
331 {
332 TL_RAISE_IF(i < 0 || i >= getSym().num(),
333 "Wrong index for FGSTensor::contractAndAdd");
334
335 TL_RAISE_IF(getSym()[i] != col.dimen() || tdims.getNVS()[i] != col.nvar(),
336 "Wrong dimensions for FGSTensor::contractAndAdd");
337
338 // set ‘sym_left’ and ‘sym_right’ to symmetries around ‘i’
339 /* Here we have a symmetry of ‘this’ tensor and we have to set
340 ‘sym_left’ to the subsymmetry left from the i-th variable and
341 ‘sym_right’ to the subsymmetry right from the i-th variable. So we
342 copy first all the symmetry and then put zeros to the left for
343 ‘sym_right’ and to the right for ‘sym_left’. */
344 Symmetry sym_left(getSym());
345 Symmetry sym_right(getSym());
346 for (int j = 0; j < getSym().num(); j++)
347 {
348 if (j <= i)
349 sym_right[j] = 0;
350 if (j >= i)
351 sym_left[j] = 0;
352 }
353
354 int dleft = TensorDimens(sym_left, tdims.getNVS()).calcFoldMaxOffset();
355 int dright = TensorDimens(sym_right, tdims.getNVS()).calcFoldMaxOffset();
356 KronProdAll kp(3);
357 kp.setUnit(0, dleft);
358 kp.setMat(1, col);
359 kp.setUnit(2, dright);
360 FGSTensor tmp(out.nrows(), out.getDims());
361 kp.mult(*this, tmp);
362 out.add(1.0, tmp);
363 }
364
365 /* Here we go through folded tensor, and each index we convert to index
366 of the unfolded tensor and copy the data to the unfolded. Then we
367 unfold data within the unfolded tensor. */
368
UGSTensor(const FGSTensor & ft)369 UGSTensor::UGSTensor(const FGSTensor &ft)
370 : UTensor(indor::along_col, ft.tdims.getNVX(), ft.nrows(),
371 ft.tdims.calcUnfoldMaxOffset(), ft.dimen()),
372 tdims(ft.tdims)
373 {
374 for (index fi = ft.begin(); fi != ft.end(); ++fi)
375 {
376 index ui(*this, fi.getCoor());
377 copyColumn(ft, *fi, *ui);
378 }
379 unfoldData();
380 }
381
382 // UGSTensor slicing from FSSparseTensor
383 /* This makes a folded slice from the sparse tensor and unfolds it. */
UGSTensor(const FSSparseTensor & t,const IntSequence & ss,const IntSequence & coor,TensorDimens td)384 UGSTensor::UGSTensor(const FSSparseTensor &t, const IntSequence &ss,
385 const IntSequence &coor, TensorDimens td)
386 : UTensor(indor::along_col, td.getNVX(), t.nrows(),
387 td.calcUnfoldMaxOffset(), td.dimen()),
388 tdims(std::move(td))
389 {
390 if (ncols() == 0)
391 return;
392
393 FGSTensor ft(t, ss, coor, td);
394 for (index fi = ft.begin(); fi != ft.end(); ++fi)
395 {
396 index ui(*this, fi.getCoor());
397 copyColumn(ft, *fi, *ui);
398 }
399 unfoldData();
400 }
401
402 // UGSTensor slicing from UFSTensor
403 /* This makes a folded slice from dense and unfolds it. */
UGSTensor(const UFSTensor & t,const IntSequence & ss,const IntSequence & coor,TensorDimens td)404 UGSTensor::UGSTensor(const UFSTensor &t, const IntSequence &ss,
405 const IntSequence &coor, TensorDimens td)
406 : UTensor(indor::along_col, td.getNVX(), t.nrows(),
407 td.calcUnfoldMaxOffset(), td.dimen()),
408 tdims(std::move(td))
409 {
410 FFSTensor folded(t);
411 FGSTensor ft(folded, ss, coor, td);
412 for (index fi = ft.begin(); fi != ft.end(); ++fi)
413 {
414 index ui(*this, fi.getCoor());
415 copyColumn(ft, *fi, *ui);
416 }
417 unfoldData();
418 }
419
420 // UGSTensor increment and decrement codes
421 /* Clear, just call UTensor static methods. */
422 void
increment(IntSequence & v) const423 UGSTensor::increment(IntSequence &v) const
424 {
425 TL_RAISE_IF(v.size() != dimen(),
426 "Wrong input/output vector size in UGSTensor::increment");
427
428 UTensor::increment(v, tdims.getNVX());
429 }
430
431 void
decrement(IntSequence & v) const432 UGSTensor::decrement(IntSequence &v) const
433 {
434 TL_RAISE_IF(v.size() != dimen(),
435 "Wrong input/output vector size in UGSTensor::decrement");
436
437 UTensor::decrement(v, tdims.getNVX());
438 }
439
440 /* Return a new instance of folded version. */
441 std::unique_ptr<FTensor>
fold() const442 UGSTensor::fold() const
443 {
444 return std::make_unique<FGSTensor>(*this);
445 }
446
447 /* Return an offset of a given index. */
448 int
getOffset(const IntSequence & v) const449 UGSTensor::getOffset(const IntSequence &v) const
450 {
451 TL_RAISE_IF(v.size() != dimen(),
452 "Wrong input vector size in UGSTensor::getOffset");
453
454 return UTensor::getOffset(v, tdims.getNVX());
455 }
456
457 /* Unfold all data. We go through all the columns and for each we
458 obtain an index of the first equivalent, and copy the data. */
459
460 void
unfoldData()461 UGSTensor::unfoldData()
462 {
463 for (index in = begin(); in != end(); ++in)
464 copyColumn(*getFirstIndexOf(in), *in);
465 }
466
467 /* Here we return the first index which is equivalent in the symmetry
468 to the given index. It is a matter of sorting all the symmetry
469 partitions of the index. */
470
471 Tensor::index
getFirstIndexOf(const index & in) const472 UGSTensor::getFirstIndexOf(const index &in) const
473 {
474 IntSequence v(in.getCoor());
475 int last = 0;
476 for (int i = 0; i < tdims.getSym().num(); i++)
477 {
478 IntSequence vtmp(v, last, last+tdims.getSym()[i]);
479 vtmp.sort();
480 last += tdims.getSym()[i];
481 }
482 return index(*this, v);
483 }
484
485 /* Here is perfectly same code with the same semantics as in
486 FGSTensor::contractAndAdd(). */
487
488 void
contractAndAdd(int i,UGSTensor & out,const URSingleTensor & col) const489 UGSTensor::contractAndAdd(int i, UGSTensor &out,
490 const URSingleTensor &col) const
491 {
492 TL_RAISE_IF(i < 0 || i >= getSym().num(),
493 "Wrong index for UGSTensor::contractAndAdd");
494 TL_RAISE_IF(getSym()[i] != col.dimen() || tdims.getNVS()[i] != col.nvar(),
495 "Wrong dimensions for UGSTensor::contractAndAdd");
496
497 // set ‘sym_left’ and ‘sym_right’ to symmetries around i
498 /* Same code as in FGSTensor::contractAndAdd */
499 Symmetry sym_left(getSym());
500 Symmetry sym_right(getSym());
501 for (int j = 0; j < getSym().num(); j++)
502 {
503 if (j <= i)
504 sym_right[j] = 0;
505 if (j >= i)
506 sym_left[j] = 0;
507 }
508
509 int dleft = TensorDimens(sym_left, tdims.getNVS()).calcUnfoldMaxOffset();
510 int dright = TensorDimens(sym_right, tdims.getNVS()).calcUnfoldMaxOffset();
511 KronProdAll kp(3);
512 kp.setUnit(0, dleft);
513 kp.setMat(1, col);
514 kp.setUnit(2, dright);
515 UGSTensor tmp(out.nrows(), out.getDims());
516 kp.mult(*this, tmp);
517 out.add(1.0, tmp);
518 }
519