1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
12 
13 namespace Eigen {
14 
15 /** \class TensorFixedSize
16   * \ingroup CXX11_Tensor_Module
17   *
18   * \brief The fixed sized version of the tensor class.
19   *
20   * The fixed sized equivalent of
21   * Eigen::Tensor<float, 3> t(3, 5, 7);
22   * is
23   * Eigen::TensorFixedSize<float, Size<3,5,7>> t;
24   */
25 
26 template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
27 class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> >
28 {
29   public:
30     typedef TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> Self;
31     typedef TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> > Base;
32     typedef typename Eigen::internal::nested<Self>::type Nested;
33     typedef typename internal::traits<Self>::StorageKind StorageKind;
34     typedef typename internal::traits<Self>::Index Index;
35     typedef Scalar_ Scalar;
36     typedef typename NumTraits<Scalar>::Real RealScalar;
37     typedef typename Base::CoeffReturnType CoeffReturnType;
38 
39     static const int Options = Options_;
40 
41     enum {
42       IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
43       Layout = Options_ & RowMajor ? RowMajor : ColMajor,
44       CoordAccess = true,
45       RawAccess = true
46     };
47 
48   typedef Dimensions_ Dimensions;
49   static const std::size_t NumIndices = Dimensions::count;
50 
51   protected:
52   TensorStorage<Scalar, Dimensions, Options> m_storage;
53 
54   public:
rank()55     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index                    rank()                   const { return NumIndices; }
dimension(std::size_t n)56     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index                    dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
dimensions()57     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions&        dimensions()             const { return m_storage.dimensions(); }
size()58     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index                    size()                   const { return m_storage.size(); }
data()59     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar                   *data()                        { return m_storage.data(); }
data()60     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar             *data()                  const { return m_storage.data(); }
61 
62     // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
63     // work, because that uses base().coeffRef() - and we don't yet
64     // implement a similar class hierarchy
base()65     inline Self& base()             { return *this; }
base()66     inline const Self& base() const { return *this; }
67 
68 #if EIGEN_HAS_VARIADIC_TEMPLATES
69     template<typename... IndexTypes>
coeff(Index firstIndex,IndexTypes...otherIndices)70     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
71     {
72       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
73       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
74       return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
75     }
76 #endif
77 
78     EIGEN_DEVICE_FUNC
coeff(const array<Index,NumIndices> & indices)79     EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
80     {
81       eigen_internal_assert(checkIndexRange(indices));
82       return m_storage.data()[linearizedIndex(indices)];
83     }
84 
85     EIGEN_DEVICE_FUNC
coeff(Index index)86     EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
87     {
88       eigen_internal_assert(index >= 0 && index < size());
89       return m_storage.data()[index];
90     }
91 
92     EIGEN_DEVICE_FUNC
coeff()93     EIGEN_STRONG_INLINE const Scalar& coeff() const
94     {
95       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
96       return m_storage.data()[0];
97     }
98 
99 
100 #if EIGEN_HAS_VARIADIC_TEMPLATES
101     template<typename... IndexTypes>
coeffRef(Index firstIndex,IndexTypes...otherIndices)102     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
103     {
104       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
105       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
106       return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
107     }
108 #endif
109 
110     EIGEN_DEVICE_FUNC
coeffRef(const array<Index,NumIndices> & indices)111     EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
112     {
113       eigen_internal_assert(checkIndexRange(indices));
114       return m_storage.data()[linearizedIndex(indices)];
115     }
116 
117     EIGEN_DEVICE_FUNC
coeffRef(Index index)118     EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
119     {
120       eigen_internal_assert(index >= 0 && index < size());
121       return m_storage.data()[index];
122     }
123 
124     EIGEN_DEVICE_FUNC
coeffRef()125     EIGEN_STRONG_INLINE Scalar& coeffRef()
126     {
127       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
128       return m_storage.data()[0];
129     }
130 
131 #if EIGEN_HAS_VARIADIC_TEMPLATES
132     template<typename... IndexTypes>
operator()133     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
134     {
135       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
136       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
137       return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
138     }
139 #else
140     EIGEN_DEVICE_FUNC
operator()141     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
142     {
143       if (Options&RowMajor) {
144         const Index index = i1 + i0 * m_storage.dimensions()[1];
145         return m_storage.data()[index];
146       } else {
147         const Index index = i0 + i1 * m_storage.dimensions()[0];
148         return m_storage.data()[index];
149       }
150     }
151     EIGEN_DEVICE_FUNC
operator()152     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
153     {
154       if (Options&RowMajor) {
155          const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
156          return m_storage.data()[index];
157       } else {
158          const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
159         return m_storage.data()[index];
160       }
161     }
162     EIGEN_DEVICE_FUNC
operator()163     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
164     {
165       if (Options&RowMajor) {
166         const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
167         return m_storage.data()[index];
168       } else {
169         const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
170         return m_storage.data()[index];
171       }
172     }
173     EIGEN_DEVICE_FUNC
operator()174     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
175     {
176       if (Options&RowMajor) {
177         const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
178         return m_storage.data()[index];
179       } else {
180         const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
181         return m_storage.data()[index];
182       }
183     }
184 #endif
185 
186 
187     EIGEN_DEVICE_FUNC
operator()188     EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
189     {
190       eigen_assert(checkIndexRange(indices));
191       return coeff(indices);
192     }
193 
194     EIGEN_DEVICE_FUNC
operator()195     EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
196     {
197       eigen_internal_assert(index >= 0 && index < size());
198       return coeff(index);
199     }
200 
201     EIGEN_DEVICE_FUNC
operator()202     EIGEN_STRONG_INLINE const Scalar& operator()() const
203     {
204       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
205       return coeff();
206     }
207 
208     EIGEN_DEVICE_FUNC
209     EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
210     {
211       // The bracket operator is only for vectors, use the parenthesis operator instead.
212       EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
213       return coeff(index);
214     }
215 
216 #if EIGEN_HAS_VARIADIC_TEMPLATES
217     template<typename... IndexTypes>
operator()218     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
219     {
220       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
221       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
222       return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
223     }
224 #else
225     EIGEN_DEVICE_FUNC
operator()226     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
227     {
228        if (Options&RowMajor) {
229          const Index index = i1 + i0 * m_storage.dimensions()[1];
230         return m_storage.data()[index];
231       } else {
232         const Index index = i0 + i1 * m_storage.dimensions()[0];
233         return m_storage.data()[index];
234       }
235     }
236     EIGEN_DEVICE_FUNC
operator()237     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
238     {
239        if (Options&RowMajor) {
240          const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
241         return m_storage.data()[index];
242       } else {
243          const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
244         return m_storage.data()[index];
245       }
246     }
247     EIGEN_DEVICE_FUNC
operator()248     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
249     {
250       if (Options&RowMajor) {
251         const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
252         return m_storage.data()[index];
253       } else {
254         const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
255         return m_storage.data()[index];
256       }
257     }
258     EIGEN_DEVICE_FUNC
operator()259     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
260     {
261       if (Options&RowMajor) {
262         const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
263         return m_storage.data()[index];
264       } else {
265         const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
266         return m_storage.data()[index];
267       }
268     }
269 #endif
270 
271     EIGEN_DEVICE_FUNC
operator()272     EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
273     {
274       eigen_assert(checkIndexRange(indices));
275       return coeffRef(indices);
276     }
277 
278     EIGEN_DEVICE_FUNC
operator()279     EIGEN_STRONG_INLINE Scalar& operator()(Index index)
280     {
281       eigen_assert(index >= 0 && index < size());
282       return coeffRef(index);
283     }
284 
285     EIGEN_DEVICE_FUNC
operator()286     EIGEN_STRONG_INLINE Scalar& operator()()
287     {
288       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
289       return coeffRef();
290     }
291 
292     EIGEN_DEVICE_FUNC
293     EIGEN_STRONG_INLINE Scalar& operator[](Index index)
294     {
295       // The bracket operator is only for vectors, use the parenthesis operator instead
296       EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
297       return coeffRef(index);
298     }
299 
300     EIGEN_DEVICE_FUNC
TensorFixedSize()301     EIGEN_STRONG_INLINE TensorFixedSize()
302       : m_storage()
303     {
304     }
305 
306     EIGEN_DEVICE_FUNC
TensorFixedSize(const Self & other)307     EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
308       : m_storage(other.m_storage)
309     {
310     }
311 
312 #if EIGEN_HAS_RVALUE_REFERENCES
TensorFixedSize(Self && other)313     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
314       : m_storage(other.m_storage)
315     {
316     }
317 #endif
318 
319     template<typename OtherDerived>
320     EIGEN_DEVICE_FUNC
TensorFixedSize(const TensorBase<OtherDerived,ReadOnlyAccessors> & other)321     EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
322     {
323       typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
324       Assign assign(*this, other.derived());
325       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
326     }
327     template<typename OtherDerived>
328     EIGEN_DEVICE_FUNC
TensorFixedSize(const TensorBase<OtherDerived,WriteAccessors> & other)329     EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other)
330     {
331       typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
332       Assign assign(*this, other.derived());
333       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
334     }
335 
336     EIGEN_DEVICE_FUNC
337     EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other)
338     {
339       // FIXME: check that the dimensions of other match the dimensions of *this.
340       // Unfortunately this isn't possible yet when the rhs is an expression.
341       typedef TensorAssignOp<Self, const TensorFixedSize> Assign;
342       Assign assign(*this, other);
343       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
344       return *this;
345     }
346     template<typename OtherDerived>
347     EIGEN_DEVICE_FUNC
348     EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other)
349     {
350       // FIXME: check that the dimensions of other match the dimensions of *this.
351       // Unfortunately this isn't possible yet when the rhs is an expression.
352       typedef TensorAssignOp<Self, const OtherDerived> Assign;
353       Assign assign(*this, other);
354       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
355       return *this;
356     }
357 
358   protected:
359     EIGEN_DEVICE_FUNC
checkIndexRange(const array<Index,NumIndices> &)360     EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
361     {
362       using internal::array_apply_and_reduce;
363       using internal::array_zip_and_reduce;
364       using internal::greater_equal_zero_op;
365       using internal::logical_and_op;
366       using internal::lesser_op;
367 
368       return true;
369         // check whether the indices are all >= 0
370           /*       array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
371         // check whether the indices fit in the dimensions
372         array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
373     }
374 
375     EIGEN_DEVICE_FUNC
linearizedIndex(const array<Index,NumIndices> & indices)376     EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
377     {
378       if (Options&RowMajor) {
379         return m_storage.dimensions().IndexOfRowMajor(indices);
380       } else {
381         return m_storage.dimensions().IndexOfColMajor(indices);
382       }
383     }
384 };
385 
386 
387 } // end namespace Eigen
388 
389 #endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
390