1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H
11 #define EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H
12 
13 namespace Eigen {
14 
15 enum {
16   NegationFlag           = 0x01,
17   ConjugationFlag        = 0x02
18 };
19 
20 enum {
21   GlobalRealFlag         = 0x01,
22   GlobalImagFlag         = 0x02,
23   GlobalZeroFlag         = 0x03
24 };
25 
26 namespace internal {
27 
28 template<std::size_t NumIndices, typename... Sym>                   struct tensor_symmetry_pre_analysis;
29 template<std::size_t NumIndices, typename... Sym>                   struct tensor_static_symgroup;
30 template<bool instantiate, std::size_t NumIndices, typename... Sym> struct tensor_static_symgroup_if;
31 template<typename Tensor_> struct tensor_symmetry_calculate_flags;
32 template<typename Tensor_> struct tensor_symmetry_assign_value;
33 template<typename... Sym> struct tensor_symmetry_num_indices;
34 
35 } // end namespace internal
36 
37 template<int One_, int Two_>
38 struct Symmetry
39 {
40   static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
41   constexpr static int One = One_;
42   constexpr static int Two = Two_;
43   constexpr static int Flags = 0;
44 };
45 
46 template<int One_, int Two_>
47 struct AntiSymmetry
48 {
49   static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
50   constexpr static int One = One_;
51   constexpr static int Two = Two_;
52   constexpr static int Flags = NegationFlag;
53 };
54 
55 template<int One_, int Two_>
56 struct Hermiticity
57 {
58   static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
59   constexpr static int One = One_;
60   constexpr static int Two = Two_;
61   constexpr static int Flags = ConjugationFlag;
62 };
63 
64 template<int One_, int Two_>
65 struct AntiHermiticity
66 {
67   static_assert(One_ != Two_, "Symmetries must cover distinct indices.");
68   constexpr static int One = One_;
69   constexpr static int Two = Two_;
70   constexpr static int Flags = ConjugationFlag | NegationFlag;
71 };
72 
73 /** \class DynamicSGroup
74   * \ingroup TensorSymmetry_Module
75   *
76   * \brief Dynamic symmetry group
77   *
78   * The %DynamicSGroup class represents a symmetry group that need not be known at
79   * compile time. It is useful if one wants to support arbitrary run-time defineable
80   * symmetries for tensors, but it is also instantiated if a symmetry group is defined
81   * at compile time that would be either too large for the compiler to reasonably
82   * generate (using templates to calculate this at compile time is very inefficient)
83   * or that the compiler could generate the group but that it wouldn't make sense to
84   * unroll the loop for setting coefficients anymore.
85   */
86 class DynamicSGroup;
87 
88 /** \internal
89   *
90   * \class DynamicSGroupFromTemplateArgs
91   * \ingroup TensorSymmetry_Module
92   *
93   * \brief Dynamic symmetry group, initialized from template arguments
94   *
95   * This class is a child class of DynamicSGroup. It uses the template arguments
96   * specified to initialize itself.
97   */
98 template<typename... Gen>
99 class DynamicSGroupFromTemplateArgs;
100 
101 /** \class StaticSGroup
102   * \ingroup TensorSymmetry_Module
103   *
104   * \brief Static symmetry group
105   *
106   * This class represents a symmetry group that is known and resolved completely
107   * at compile time. Ideally, no run-time penalty is incurred compared to the
108   * manual unrolling of the symmetry.
109   *
110   * <b><i>CAUTION:</i></b>
111   *
112   * Do not use this class directly for large symmetry groups. The compiler
113   * may run into a limit, or segfault or in the very least will take a very,
114   * very, very long time to compile the code. Use the SGroup class instead
115   * if you want a static group. That class contains logic that will
116   * automatically select the DynamicSGroup class instead if the symmetry
117   * group becomes too large. (In that case, unrolling may not even be
118   * beneficial.)
119   */
120 template<typename... Gen>
121 class StaticSGroup;
122 
123 /** \class SGroup
124   * \ingroup TensorSymmetry_Module
125   *
126   * \brief Symmetry group, initialized from template arguments
127   *
128   * This class represents a symmetry group whose generators are already
129   * known at compile time. It may or may not be resolved at compile time,
130   * depending on the estimated size of the group.
131   *
132   * \sa StaticSGroup
133   * \sa DynamicSGroup
134   */
135 template<typename... Gen>
136 class SGroup : public internal::tensor_symmetry_pre_analysis<internal::tensor_symmetry_num_indices<Gen...>::value, Gen...>::root_type
137 {
138   public:
139     constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices<Gen...>::value;
140     typedef typename internal::tensor_symmetry_pre_analysis<NumIndices, Gen...>::root_type Base;
141 
142     // make standard constructors + assignment operators public
SGroup()143     inline SGroup() : Base() { }
SGroup(const SGroup<Gen...> & other)144     inline SGroup(const SGroup<Gen...>& other) : Base(other) { }
SGroup(SGroup<Gen...> && other)145     inline SGroup(SGroup<Gen...>&& other) : Base(other) { }
146     inline SGroup<Gen...>& operator=(const SGroup<Gen...>& other) { Base::operator=(other); return *this; }
147     inline SGroup<Gen...>& operator=(SGroup<Gen...>&& other) { Base::operator=(other); return *this; }
148 
149     // all else is defined in the base class
150 };
151 
152 namespace internal {
153 
154 template<typename... Sym> struct tensor_symmetry_num_indices
155 {
156   constexpr static std::size_t value = 1;
157 };
158 
159 template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...>
160 {
161 private:
162   constexpr static std::size_t One = static_cast<std::size_t>(One_);
163   constexpr static std::size_t Two = static_cast<std::size_t>(Two_);
164   constexpr static std::size_t Three = tensor_symmetry_num_indices<Sym...>::value;
165 
166   // don't use std::max, since it's not constexpr until C++14...
167   constexpr static std::size_t maxOneTwoPlusOne = ((One > Two) ? One : Two) + 1;
168 public:
169   constexpr static std::size_t value = (maxOneTwoPlusOne > Three) ? maxOneTwoPlusOne : Three;
170 };
171 
172 template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<AntiSymmetry<One_, Two_>, Sym...>
173   : public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
174 template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<Hermiticity<One_, Two_>, Sym...>
175   : public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
176 template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<AntiHermiticity<One_, Two_>, Sym...>
177   : public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
178 
179 /** \internal
180   *
181   * \class tensor_symmetry_pre_analysis
182   * \ingroup TensorSymmetry_Module
183   *
184   * \brief Pre-select whether to use a static or dynamic symmetry group
185   *
186   * When a symmetry group could in principle be determined at compile time,
187   * this template implements the logic whether to actually do that or whether
188   * to rather defer that to runtime.
189   *
190   * The logic is as follows:
191   * <dl>
192   * <dt><b>No generators (trivial symmetry):</b></dt>
193   * <dd>Use a trivial static group. Ideally, this has no performance impact
194   *     compared to not using symmetry at all. In practice, this might not
195   *     be the case.</dd>
196   * <dt><b>More than 4 generators:</b></dt>
197   * <dd>Calculate the group at run time, it is likely far too large for the
198   *     compiler to be able to properly generate it in a realistic time.</dd>
199   * <dt><b>Up to and including 4 generators:</b></dt>
200   * <dd>Actually enumerate all group elements, but then check how many there
201   *     are. If there are more than 16, it is unlikely that unrolling the
202   *     loop (as is done in the static compile-time case) is sensible, so
203   *     use a dynamic group instead. If there are at most 16 elements, actually
204   *     use that static group. Note that the largest group with 4 generators
205   *     still compiles with reasonable resources.</dd>
206   * </dl>
207   *
208   * Note: Example compile time performance with g++-4.6 on an Intenl Core i5-3470
209   *       with 16 GiB RAM (all generators non-redundant and the subgroups don't
210   *       factorize):
211   *
212   *          # Generators          -O0 -ggdb               -O2
213   *          -------------------------------------------------------------------
214   *          1                 0.5 s  /   250 MiB     0.45s /   230 MiB
215   *          2                 0.5 s  /   260 MiB     0.5 s /   250 MiB
216   *          3                 0.65s  /   310 MiB     0.62s /   310 MiB
217   *          4                 2.2 s  /   860 MiB     1.7 s /   770 MiB
218   *          5               130   s  / 13000 MiB   120   s / 11000 MiB
219   *
220   * It is clear that everything is still very efficient up to 4 generators, then
221   * the memory and CPU requirements become unreasonable. Thus we only instantiate
222   * the template group theory logic if the number of generators supplied is 4 or
223   * lower, otherwise this will be forced to be done during runtime, where the
224   * algorithm is reasonably fast.
225   */
226 template<std::size_t NumIndices>
227 struct tensor_symmetry_pre_analysis<NumIndices>
228 {
229   typedef StaticSGroup<> root_type;
230 };
231 
232 template<std::size_t NumIndices, typename Gen_, typename... Gens_>
233 struct tensor_symmetry_pre_analysis<NumIndices, Gen_, Gens_...>
234 {
235   constexpr static std::size_t max_static_generators = 4;
236   constexpr static std::size_t max_static_elements = 16;
237   typedef tensor_static_symgroup_if<(sizeof...(Gens_) + 1 <= max_static_generators), NumIndices, Gen_, Gens_...> helper;
238   constexpr static std::size_t possible_size = helper::size;
239 
240   typedef typename conditional<
241     possible_size == 0 || possible_size >= max_static_elements,
242     DynamicSGroupFromTemplateArgs<Gen_, Gens_...>,
243     typename helper::type
244   >::type root_type;
245 };
246 
247 template<bool instantiate, std::size_t NumIndices, typename... Gens>
248 struct tensor_static_symgroup_if
249 {
250   constexpr static std::size_t size = 0;
251   typedef void type;
252 };
253 
254 template<std::size_t NumIndices, typename... Gens>
255 struct tensor_static_symgroup_if<true, NumIndices, Gens...> : tensor_static_symgroup<NumIndices, Gens...> {};
256 
257 template<typename Tensor_>
258 struct tensor_symmetry_assign_value
259 {
260   typedef typename Tensor_::Index Index;
261   typedef typename Tensor_::Scalar Scalar;
262   constexpr static std::size_t NumIndices = Tensor_::NumIndices;
263 
264   static inline int run(const std::array<Index, NumIndices>& transformed_indices, int transformation_flags, int dummy, Tensor_& tensor, const Scalar& value_)
265   {
266     Scalar value(value_);
267     if (transformation_flags & ConjugationFlag)
268       value = numext::conj(value);
269     if (transformation_flags & NegationFlag)
270       value = -value;
271     tensor.coeffRef(transformed_indices) = value;
272     return dummy;
273   }
274 };
275 
276 template<typename Tensor_>
277 struct tensor_symmetry_calculate_flags
278 {
279   typedef typename Tensor_::Index Index;
280   constexpr static std::size_t NumIndices = Tensor_::NumIndices;
281 
282   static inline int run(const std::array<Index, NumIndices>& transformed_indices, int transform_flags, int current_flags, const std::array<Index, NumIndices>& orig_indices)
283   {
284     if (transformed_indices == orig_indices) {
285       if (transform_flags & (ConjugationFlag | NegationFlag))
286         return current_flags | GlobalImagFlag; // anti-hermitian diagonal
287       else if (transform_flags & ConjugationFlag)
288         return current_flags | GlobalRealFlag; // hermitian diagonal
289       else if (transform_flags & NegationFlag)
290         return current_flags | GlobalZeroFlag; // anti-symmetric diagonal
291     }
292     return current_flags;
293   }
294 };
295 
296 template<typename Tensor_, typename Symmetry_, int Flags = 0>
297 class tensor_symmetry_value_setter
298 {
299   public:
300     typedef typename Tensor_::Index Index;
301     typedef typename Tensor_::Scalar Scalar;
302     constexpr static std::size_t NumIndices = Tensor_::NumIndices;
303 
304     inline tensor_symmetry_value_setter(Tensor_& tensor, Symmetry_ const& symmetry, std::array<Index, NumIndices> const& indices)
305       : m_tensor(tensor), m_symmetry(symmetry), m_indices(indices) { }
306 
307     inline tensor_symmetry_value_setter<Tensor_, Symmetry_, Flags>& operator=(Scalar const& value)
308     {
309       doAssign(value);
310       return *this;
311     }
312   private:
313     Tensor_& m_tensor;
314     Symmetry_ m_symmetry;
315     std::array<Index, NumIndices> m_indices;
316 
317     inline void doAssign(Scalar const& value)
318     {
319       #ifdef EIGEN_TENSOR_SYMMETRY_CHECK_VALUES
320         int value_flags = m_symmetry.template apply<internal::tensor_symmetry_calculate_flags<Tensor_>, int>(m_indices, m_symmetry.globalFlags(), m_indices);
321         if (value_flags & GlobalRealFlag)
322           eigen_assert(numext::imag(value) == 0);
323         if (value_flags & GlobalImagFlag)
324           eigen_assert(numext::real(value) == 0);
325       #endif
326       m_symmetry.template apply<internal::tensor_symmetry_assign_value<Tensor_>, int>(m_indices, 0, m_tensor, value);
327     }
328 };
329 
330 } // end namespace internal
331 
332 } // end namespace Eigen
333 
334 #endif // EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H
335 
336 /*
337  * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle;
338  */
339