1 /* 2 * Copyright © 2005 Ondra Kamenik 3 * Copyright © 2019 Dynare Team 4 * 5 * This file is part of Dynare. 6 * 7 * Dynare is free software: you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation, either version 3 of the License, or 10 * (at your option) any later version. 11 * 12 * Dynare is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with Dynare. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 // Refined stack of containers. 22 23 /* This file defines a refinement of the stack container. It makes a 24 vertical refinement of a given stack container, it refines only matrix 25 items, the items which are always zero, or can be identity matrices 26 are not refined. 27 28 The refinement is done by a simple construction from the stack 29 container being refined. A parameter is passed meaning a maximum size 30 of each stack in the refined container. The resulting object is stack 31 container, so everything works seamlessly. 32 33 We define here a class for refinement of sizes SizeRefinement, this 34 is purely an auxiliary class allowing us to write a code more 35 concisely. The main class of this file is FineContainer, which 36 corresponds to refining. The two more classes FoldedFineContainer 37 and UnfoldedFineContainer are its specializations. 38 39 NOTE: This code was implemented with a hope that it will help to cut 40 down memory allocations during the Faà Di Bruno formula 41 evaluation. However, it seems that this needs to be accompanied with a 42 similar thing for tensor multidimensional index. Thus, the abstraction 43 is not currently used, but it might be useful in future. */ 44 45 #ifndef FINE_CONTAINER_H 46 #define FINE_CONTAINER_H 47 48 #include "stack_container.hh" 49 50 #include <vector> 51 #include <memory> 52 53 /* This class splits the first nc elements of the given sequence s 54 to a sequence not having items greater than given max. The remaining 55 elements (those behind nc) are left untouched. It also remembers the 56 mapping, i.e. for a given index in a new sequence, it is able to 57 return a corresponding index in old sequence. */ 58 59 class SizeRefinement 60 { 61 std::vector<int> rsizes; 62 std::vector<int> ind_map; 63 int new_nc; 64 public: 65 SizeRefinement(const IntSequence &s, int nc, int max); 66 int getRefSize(int i) const67 getRefSize(int i) const 68 { 69 return rsizes[i]; 70 } 71 int numRefinements() const72 numRefinements() const 73 { 74 return rsizes.size(); 75 } 76 int getOldIndex(int i) const77 getOldIndex(int i) const 78 { 79 return ind_map[i]; 80 } 81 int getNC() const82 getNC() const 83 { 84 return new_nc; 85 } 86 }; 87 88 /* This main class of this class refines a given stack container, and 89 inherits from the stack container. It also defines the getType() 90 method, which returns a type for a given stack as the type of the 91 corresponding (old) stack of the former stack container. */ 92 93 template<class _Ttype> 94 class FineContainer : public SizeRefinement, public StackContainer<_Ttype> 95 { 96 protected: 97 using _Stype = StackContainer<_Ttype>; 98 using _Ctype = typename StackContainerInterface<_Ttype>::_Ctype; 99 using itype = typename StackContainerInterface<_Ttype>::itype; 100 std::vector<std::unique_ptr<_Ctype>> ref_conts; 101 const _Stype &stack_cont; 102 public: 103 /* Here we construct the SizeRefinement and allocate space for the 104 refined containers. Then, the containers are created and put to 105 conts array. Note that the containers do not claim any further 106 space, since all the tensors of the created containers are in-place 107 submatrices. 108 109 Here we use a dirty trick of converting const pointer to non-const 110 pointer and passing it to a subtensor container constructor. The 111 containers are stored in ref_conts and then in conts from 112 StackContainer. However, this is safe since neither ref_conts nor 113 conts are used in non-const contexts. For example, 114 StackContainer has only a const method to return a member of 115 conts. */ 116 FineContainer(const _Stype & sc,int max)117 FineContainer(const _Stype &sc, int max) 118 : SizeRefinement(sc.getStackSizes(), sc.numConts(), max), 119 StackContainer<_Ttype>(numRefinements(), getNC()), 120 ref_conts(getNC()), 121 stack_cont(sc) 122 { 123 for (int i = 0; i < numRefinements(); i++) 124 _Stype::stack_sizes[i] = getRefSize(i); 125 _Stype::calculateOffsets(); 126 127 int last_cont = -1; 128 int last_row = 0; 129 for (int i = 0; i < getNC(); i++) 130 { 131 if (getOldIndex(i) != last_cont) 132 { 133 last_cont = getOldIndex(i); 134 last_row = 0; 135 } 136 ref_conts[i] = std::make_unique<_Ctype>(last_row, _Stype::stack_sizes[i], 137 const_cast<_Ctype &>(stack_cont.getCont(last_cont))); 138 _Stype::conts[i] = ref_conts[i].get(); 139 last_row += _Stype::stack_sizes[i]; 140 } 141 } 142 143 itype getType(int i,const Symmetry & s) const144 getType(int i, const Symmetry &s) const override 145 { 146 return stack_cont.getType(getOldIndex(i), s); 147 } 148 149 }; 150 151 /* Here is FineContainer specialization for folded tensors. */ 152 class FoldedFineContainer : public FineContainer<FGSTensor>, public FoldedStackContainer 153 { 154 public: FoldedFineContainer(const StackContainer<FGSTensor> & sc,int max)155 FoldedFineContainer(const StackContainer<FGSTensor> &sc, int max) 156 : FineContainer<FGSTensor>(sc, max) 157 { 158 } 159 }; 160 161 /* Here is FineContainer specialization for unfolded tensors. */ 162 class UnfoldedFineContainer : public FineContainer<UGSTensor>, public UnfoldedStackContainer 163 { 164 public: UnfoldedFineContainer(const StackContainer<UGSTensor> & sc,int max)165 UnfoldedFineContainer(const StackContainer<UGSTensor> &sc, int max) 166 : FineContainer<UGSTensor>(sc, max) 167 { 168 } 169 }; 170 171 #endif 172