1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1998 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_tensor_accessors_h
17 #define dealii_tensor_accessors_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/table_indices.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 
25 DEAL_II_NAMESPACE_OPEN
26 
27 /**
28  * This namespace is a collection of algorithms working on generic tensorial
29  * objects (of arbitrary rank).
30  *
31  * The rationale to implement such functionality in a generic fashion in a
32  * separate namespace is
33  *  - to easy code reusability and therefore avoid code duplication.
34  *  - to have a well-defined interface that allows to exchange the low
35  * level implementation.
36  *
37  *
38  * A tensorial object has the notion of a rank and allows a rank-times
39  * recursive application of the index operator, e.g., if <code>t</code> is a
40  * tensorial object of rank 4, the following access is valid:
41  * @code
42  *   t[1][2][1][4]
43  * @endcode
44  *
45  * deal.II has its own implementation for tensorial objects such as
46  * dealii::Tensor<rank, dim, Number> and dealii::SymmetricTensor<rank, dim,
47  * Number>
48  *
49  * The methods and algorithms implemented in this namespace, however, are
50  * fully generic. More precisely, it can operate on nested c-style arrays, or
51  * on class types <code>T</code> with a minimal interface that provides a
52  * local alias <code>value_type</code> and an index operator
53  * <code>operator[](unsigned int)</code> that returns a (const or non-const)
54  * reference of <code>value_type</code>:
55  * @code
56  *   template <...>
57  *   class T
58  *   {
59  *     using value_type = ...;
60  *     value_type & operator[](unsigned int);
61  *     const value_type & operator[](unsigned int) const;
62  *   };
63  * @endcode
64  *
65  * This namespace provides primitives for access, reordering and contraction
66  * of such objects.
67  *
68  * @ingroup geomprimitives
69  */
70 namespace TensorAccessors
71 {
72   // forward declarations
73   namespace internal
74   {
75     template <int index, int rank, typename T>
76     class ReorderedIndexView;
77     template <int position, int rank>
78     struct ExtractHelper;
79     template <int no_contr, int rank_1, int rank_2, int dim>
80     class Contract;
81     template <int rank_1, int rank_2, int dim>
82     class Contract3;
83   } // namespace internal
84 
85 
86   /**
87    * This class provides a local alias @p value_type denoting the resulting
88    * type of an access with operator[](unsigned int). More precisely, @p
89    * value_type will be
90    *  - <code>T::value_type</code> if T is a tensorial class providing an
91    * alias <code>value_type</code> and does not have a const qualifier.
92    *  - <code>const T::value_type</code> if T is a tensorial class
93    * providing an alias <code>value_type</code> and does have a const
94    * qualifier.
95    *  - <code>const T::value_type</code> if T is a tensorial class
96    * providing an alias <code>value_type</code> and does have a const
97    * qualifier.
98    *  - <code>A</code> if T is of array type <code>A[...]</code>
99    *  - <code>const A</code> if T is of array type <code>A[...]</code> and
100    * does have a const qualifier.
101    */
102   template <typename T>
103   struct ValueType
104   {
105     using value_type = typename T::value_type;
106   };
107 
108   template <typename T>
109   struct ValueType<const T>
110   {
111     using value_type = const typename T::value_type;
112   };
113 
114   template <typename T, std::size_t N>
115   struct ValueType<T[N]>
116   {
117     using value_type = T;
118   };
119 
120   template <typename T, std::size_t N>
121   struct ValueType<const T[N]>
122   {
123     using value_type = const T;
124   };
125 
126 
127   /**
128    * This class provides a local alias @p value_type that is equal to the
129    * alias <code>value_type</code> after @p deref_steps recursive
130    * dereferences via ```operator[](unsigned int)```. Further, constness is
131    * preserved via the ValueType type trait, i.e., if T is const,
132    * ReturnType<rank, T>::value_type will also be const.
133    */
134   template <int deref_steps, typename T>
135   struct ReturnType
136   {
137     using value_type =
138       typename ReturnType<deref_steps - 1,
139                           typename ValueType<T>::value_type>::value_type;
140   };
141 
142   template <typename T>
143   struct ReturnType<0, T>
144   {
145     using value_type = T;
146   };
147 
148 
149   /**
150    * Provide a "tensorial view" to a reference @p t of a tensor object of rank
151    * @p rank in which the index @p index is shifted to the end. As an example
152    * consider a tensor of 5th order in dim=5 space dimensions that can be
153    * accessed through 5 recursive <code>operator[]()</code> invocations:
154    * @code
155    *   Tensor<5, dim> tensor;
156    *   tensor[0][1][2][3][4] = 42.;
157    * @endcode
158    * Index 1 (the 2nd index, count starts at 0) can now be shifted to the end
159    * via
160    * @code
161    *   auto tensor_view = reordered_index_view<1, 5>(tensor);
162    *   tensor_view[0][2][3][4][1] == 42.; // is true
163    * @endcode
164    * The usage of the dealii::Tensor type was solely for the sake of an
165    * example. The mechanism implemented by this function is available for
166    * fairly general tensorial types @p T.
167    *
168    * The purpose of this reordering facility is to be able to contract over an
169    * arbitrary index of two (or more) tensors:
170    *  - reorder the indices in mind to the end of the tensors
171    *  - use the contract function below that contracts the _last_ elements of
172    * tensors.
173    *
174    * @note This function returns an internal class object consisting of an
175    * array subscript operator <code>operator[](unsigned int)</code> and an
176    * alias <code>value_type</code> describing its return value.
177    *
178    * @tparam index The index to be shifted to the end. Indices are counted
179    * from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
180    * @tparam rank Rank of the tensorial object @p t
181    * @tparam T A tensorial object of rank @p rank. @p T must provide a local
182    * alias <code>value_type</code> and an index operator
183    * <code>operator[]()</code> that returns a (const or non-const) reference
184    * of <code>value_type</code>.
185    */
186   template <int index, int rank, typename T>
187   constexpr DEAL_II_ALWAYS_INLINE internal::ReorderedIndexView<index, rank, T>
188                                   reordered_index_view(T &t)
189   {
190     static_assert(0 <= index && index < rank,
191                   "The specified index must lie within the range [0,rank)");
192 
193     return internal::ReorderedIndexView<index, rank, T>(t);
194   }
195 
196 
197   /**
198    * Return a reference (const or non-const) to a subobject of a tensorial
199    * object @p t of type @p T, as described by an array type @p ArrayType
200    * object @p indices. For example:
201    * @code
202    *   Tensor<5, dim> tensor;
203    *   TableIndices<5> indices (0, 1, 2, 3, 4);
204    *   TensorAccessors::extract(tensor, indices) = 42;
205    * @endcode
206    * This is equivalent to <code>tensor[0][1][2][3][4] = 42.</code>.
207    *
208    * @tparam T A tensorial object of rank @p rank. @p T must provide a local
209    * alias <code>value_type</code> and an index operator
210    * <code>operator[]()</code> that returns a (const or non-const) reference
211    * of <code>value_type</code>. Further, its tensorial rank must be equal or
212    * greater than @p rank.
213    *
214    * @tparam ArrayType An array like object, such as std::array, or
215    * dealii::TableIndices  that stores at least @p rank indices that can be
216    * accessed via operator[]().
217    */
218   template <int rank, typename T, typename ArrayType>
219   constexpr DEAL_II_ALWAYS_INLINE typename ReturnType<rank, T>::value_type &
220   extract(T &t, const ArrayType &indices)
221   {
222     return internal::ExtractHelper<0, rank>::template extract<T, ArrayType>(
223       t, indices);
224   }
225 
226 
227   /**
228    * This function contracts two tensorial objects @p left and @p right and
229    * stores the result in @p result. The contraction is done over the _last_
230    * @p no_contr indices of both tensorial objects:
231    *
232    * @f[
233    *   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
234    *   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
235    *     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
236    *     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
237    * @f]
238    *
239    * Calling this function is equivalent of writing the following low level
240    * code:
241    * @code
242    *   for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
243    *     ...
244    *       for(unsigned int i_ = 0; i_ < dim; ++i_)
245    *         for(unsigned int j_0 = 0; j_0 < dim; ++j_0)
246    *           ...
247    *             for(unsigned int j_ = 0; j_ < dim; ++j_)
248    *               {
249    *                 result[i_0]..[i_][j_0]..[j_] = 0.;
250    *                 for(unsigned int k_0 = 0; k_0 < dim; ++k_0)
251    *                   ...
252    *                     for(unsigned int k_ = 0; k_ < dim; ++k_)
253    *                       result[i_0]..[i_][j_0]..[j_] +=
254    *                         left[i_0]..[i_][k_0]..[k_]
255    *                           * right[j_0]..[j_][k_0]..[k_];
256    *               }
257    * @endcode
258    * with r = rank_1 + rank_2 - 2 * no_contr, l = rank_1 - no_contr, l1 =
259    * rank_1, and c = no_contr.
260    *
261    * @note The Types @p T1, @p T2, and @p T3 must have rank rank_1 + rank_2 -
262    * 2 * no_contr, rank_1, or rank_2, respectively. Obviously, no_contr must
263    * be less or equal than rank_1 and rank_2.
264    */
265   template <int no_contr,
266             int rank_1,
267             int rank_2,
268             int dim,
269             typename T1,
270             typename T2,
271             typename T3>
272   DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE void
273   contract(T1 &result, const T2 &left, const T3 &right)
274   {
275     static_assert(rank_1 >= no_contr,
276                   "The rank of the left tensor must be "
277                   "equal or greater than the number of "
278                   "contractions");
279     static_assert(rank_2 >= no_contr,
280                   "The rank of the right tensor must be "
281                   "equal or greater than the number of "
282                   "contractions");
283 
284     internal::Contract<no_contr, rank_1, rank_2, dim>::
285       template contract<T1, T2, T3>(result, left, right);
286   }
287 
288 
289   /**
290    * Full contraction of three tensorial objects:
291    *
292    * @f[
293    *   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
294    *   \text{left}_{i_1,..,i_{r1}}
295    *   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
296    *   \text{right}_{j_1,..,j_{r2}}
297    * @f]
298    *
299    * Calling this function is equivalent of writing the following low level
300    * code:
301    * @code
302    *   T1 result = T1();
303    *   for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
304    *     ...
305    *       for(unsigned int i_ = 0; i_ < dim; ++i_)
306    *         for(unsigned int j_0 = 0; j_0 < dim; ++j_0)
307    *           ...
308    *             for(unsigned int j_ = 0; j_ < dim; ++j_)
309    *               result += left[i_0]..[i_]
310    *                           * middle[i_0]..[i_][j_0]..[j_]
311    *                           * right[j_0]..[j_];
312    * @endcode
313    *
314    * @note The Types @p T2, @p T3, and @p T4 must have rank rank_1, rank_1 +
315    * rank_2, and rank_3, respectively. @p T1 must be a scalar type.
316    */
317   template <int rank_1,
318             int rank_2,
319             int dim,
320             typename T1,
321             typename T2,
322             typename T3,
323             typename T4>
324   constexpr T1
325   contract3(const T2 &left, const T3 &middle, const T4 &right)
326   {
327     return internal::Contract3<rank_1, rank_2, dim>::
328       template contract3<T1, T2, T3, T4>(left, middle, right);
329   }
330 
331 
332   namespace internal
333   {
334     // -------------------------------------------------------------------------
335     // Forward declarations and type traits
336     // -------------------------------------------------------------------------
337 
338     template <int rank, typename S>
339     class StoreIndex;
340     template <typename T>
341     class Identity;
342     template <int no_contr, int dim>
343     class Contract2;
344 
345     /**
346      * An internally used type trait to allow nested application of the
347      * function reordered_index_view(T &t).
348      *
349      * The problem is that when working with the actual tensorial types, we
350      * have to return subtensors by reference - but sometimes, especially for
351      * StoreIndex and ReorderedIndexView that return rvalues, we have to
352      * return by value.
353      */
354     template <typename T>
355     struct ReferenceType
356     {
357       using type = T &;
358     };
359 
360     template <int rank, typename S>
361     struct ReferenceType<StoreIndex<rank, S>>
362     {
363       using type = StoreIndex<rank, S>;
364     };
365 
366     template <int index, int rank, typename T>
367     struct ReferenceType<ReorderedIndexView<index, rank, T>>
368     {
369       using type = ReorderedIndexView<index, rank, T>;
370     };
371 
372 
373     // TODO: Is there a possibility to just have the following block of
374     // explanation on an internal page in doxygen? If, yes. Doxygen
375     // wizards, your call!
376 
377     // -------------------------------------------------------------------------
378     // Implementation of helper classes for reordered_index_view
379     // -------------------------------------------------------------------------
380 
381     // OK. This is utterly brutal template magic. Therefore, we will not
382     // comment on the individual internal helper classes, because this is
383     // of not much value, but explain the general recursion procedure.
384     //
385     // (In order of appearance)
386     //
387     // Our task is to reorder access to a tensor object where a specified
388     // index is moved to the end. Thus we want to construct an object
389     // <code>reordered</code> out of a <code>tensor</code> where the
390     // following access patterns are equivalent:
391     // @code
392     //   tensor    [i_0]...[i_index-1][i_index][i_index+1]...[i_n]
393     //   reordered [i_0]...[i_index_1][i_index+1]...[i_n][i_index]
394     // @endcode
395     //
396     // The first task is to get rid of the application of
397     // [i_0]...[i_index-1]. This is a classical recursion pattern - relay
398     // the task from <index, rank> to <index-1, rank-1> by accessing the
399     // subtensor object:
400 
401     template <int index, int rank, typename T>
402     class ReorderedIndexView
403     {
404     public:
405       constexpr ReorderedIndexView(typename ReferenceType<T>::type t)
406         : t_(t)
407       {}
408 
409       using value_type = ReorderedIndexView<index - 1,
410                                             rank - 1,
411                                             typename ValueType<T>::value_type>;
412 
413       // Recurse by applying index j directly:
414       constexpr DEAL_II_ALWAYS_INLINE value_type
415                                       operator[](unsigned int j) const
416       {
417         return value_type(t_[j]);
418       }
419 
420     private:
421       typename ReferenceType<T>::type t_;
422     };
423 
424     // At some point we hit the condition index == 0 and rank > 1, i.e.,
425     // the first index should be reordered to the end.
426     //
427     // At this point we cannot be lazy any more and have to start storing
428     // indices because we get them in the wrong order. The user supplies
429     //   [i_0][i_1]...[i_{rank - 1}]
430     // but we have to call the subtensor object with
431     //   [i_{rank - 1}[i_0][i_1]...[i_{rank-2}]
432     //
433     // So give up and relay the task to the StoreIndex class:
434 
435     template <int rank, typename T>
436     class ReorderedIndexView<0, rank, T>
437     {
438     public:
439       constexpr ReorderedIndexView(typename ReferenceType<T>::type t)
440         : t_(t)
441       {}
442 
443       using value_type = StoreIndex<rank - 1, internal::Identity<T>>;
444 
445       constexpr DEAL_II_ALWAYS_INLINE value_type
446                                       operator[](unsigned int j) const
447       {
448         return value_type(Identity<T>(t_), j);
449       }
450 
451     private:
452       typename ReferenceType<T>::type t_;
453     };
454 
455     // Sometimes, we're lucky and don't have to do anything. In this case
456     // just return the original tensor.
457 
458     template <typename T>
459     class ReorderedIndexView<0, 1, T>
460     {
461     public:
462       constexpr ReorderedIndexView(typename ReferenceType<T>::type t)
463         : t_(t)
464       {}
465 
466       using value_type =
467         typename ReferenceType<typename ValueType<T>::value_type>::type;
468 
469       constexpr DEAL_II_ALWAYS_INLINE value_type
470                                       operator[](unsigned int j) const
471       {
472         return t_[j];
473       }
474 
475     private:
476       typename ReferenceType<T>::type t_;
477     };
478 
479     // Here, Identity is a helper class to ground the recursion in
480     // StoreIndex. Its implementation is easy - we haven't stored any
481     // indices yet. So, we just provide a function apply that returns the
482     // application of an index j to the stored tensor t_:
483 
484     template <typename T>
485     class Identity
486     {
487     public:
488       constexpr Identity(typename ReferenceType<T>::type t)
489         : t_(t)
490       {}
491 
492       using return_type = typename ValueType<T>::value_type;
493 
494       constexpr DEAL_II_ALWAYS_INLINE typename ReferenceType<return_type>::type
495       apply(unsigned int j) const
496       {
497         return t_[j];
498       }
499 
500     private:
501       typename ReferenceType<T>::type t_;
502     };
503 
504     // StoreIndex is a class that stores an index recursively with every
505     // invocation of operator[](unsigned int j): We do this by recursively
506     // creating a new StoreIndex class of lower rank that stores the
507     // supplied index j and holds a copy of the current class (with all
508     // other stored indices). Again, we provide an apply member function
509     // that knows how to apply an index on the highest rank and all
510     // subsequently stored indices:
511 
512     template <int rank, typename S>
513     class StoreIndex
514     {
515     public:
516       constexpr StoreIndex(S s, int i)
517         : s_(s)
518         , i_(i)
519       {}
520 
521       using value_type = StoreIndex<rank - 1, StoreIndex<rank, S>>;
522 
523       constexpr DEAL_II_ALWAYS_INLINE value_type
524                                       operator[](unsigned int j) const
525       {
526         return value_type(*this, j);
527       }
528 
529       using return_type =
530         typename ValueType<typename S::return_type>::value_type;
531 
532       constexpr typename ReferenceType<return_type>::type
533       apply(unsigned int j) const
534       {
535         return s_.apply(j)[i_];
536       }
537 
538     private:
539       const S   s_;
540       const int i_;
541     };
542 
543     // We have to store indices until we hit rank == 1. Then, upon the next
544     // invocation of operator[](unsigned int j) we have all necessary
545     // information available to return the actual object.
546 
547     template <typename S>
548     class StoreIndex<1, S>
549     {
550     public:
551       constexpr StoreIndex(S s, int i)
552         : s_(s)
553         , i_(i)
554       {}
555 
556       using return_type =
557         typename ValueType<typename S::return_type>::value_type;
558       using value_type = return_type;
559 
560       constexpr DEAL_II_ALWAYS_INLINE return_type &
561                                       operator[](unsigned int j) const
562       {
563         return s_.apply(j)[i_];
564       }
565 
566     private:
567       const S   s_;
568       const int i_;
569     };
570 
571 
572     // -------------------------------------------------------------------------
573     // Implementation of helper classes for extract
574     // -------------------------------------------------------------------------
575 
576     // Straightforward recursion implemented by specializing ExtractHelper
577     // for position == rank. We use the type trait ReturnType<rank, T> to
578     // have an idea what the final type will be.
579     template <int position, int rank>
580     struct ExtractHelper
581     {
582       template <typename T, typename ArrayType>
583       constexpr static typename ReturnType<rank - position, T>::value_type &
584       extract(T &t, const ArrayType &indices)
585       {
586         return ExtractHelper<position + 1, rank>::template extract<
587           typename ValueType<T>::value_type,
588           ArrayType>(t[indices[position]], indices);
589       }
590     };
591 
592     // For position == rank there is nothing to extract, just return the
593     // object.
594     template <int rank>
595     struct ExtractHelper<rank, rank>
596     {
597       template <typename T, typename ArrayType>
598       constexpr static T &
599       extract(T &t, const ArrayType &)
600       {
601         return t;
602       }
603     };
604 
605 
606     // -------------------------------------------------------------------------
607     // Implementation of helper classes for contract
608     // -------------------------------------------------------------------------
609 
610     // Straightforward recursive pattern:
611     //
612     // As long as rank_1 > no_contr, assign indices from the left tensor to
613     // result. This builds up the first part of the nested outer loops:
614     //
615     // for(unsigned int i_0; i_0 < dim; ++i_0)
616     //   ...
617     //     for(i_; i_ < dim; ++i_)
618     //       [...]
619     //         result[i_0]..[i_] ... left[i_0]..[i_] ...
620 
621     template <int no_contr, int rank_1, int rank_2, int dim>
622     class Contract
623     {
624     public:
625       template <typename T1, typename T2, typename T3>
626       DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE static void
627       contract(T1 &result, const T2 &left, const T3 &right)
628       {
629         for (unsigned int i = 0; i < dim; ++i)
630           Contract<no_contr, rank_1 - 1, rank_2, dim>::contract(result[i],
631                                                                 left[i],
632                                                                 right);
633       }
634     };
635 
636     // If rank_1 == no_contr leave out the remaining no_contr indices for
637     // the contraction and assign indices from the right tensor to the
638     // result. This builds up the second part of the nested loops:
639     //
640     //  for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
641     //    ...
642     //      for(unsigned int i_ = 0; i_ < dim; ++i_)
643     //        for(unsigned int j_0 = 0; j_0 < dim; ++j_0)
644     //          ...
645     //            for(unsigned int j_ = 0; j_ < dim; ++j_)
646     //             [...]
647     //               result[i_0]..[i_][j_0]..[j_] ... left[i_0]..[i_] ...
648     //               right[j_0]..[j_]
649     //
650 
651     template <int no_contr, int rank_2, int dim>
652     class Contract<no_contr, no_contr, rank_2, dim>
653     {
654     public:
655       template <typename T1, typename T2, typename T3>
656       DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE static void
657       contract(T1 &result, const T2 &left, const T3 &right)
658       {
659         for (unsigned int i = 0; i < dim; ++i)
660           Contract<no_contr, no_contr, rank_2 - 1, dim>::contract(result[i],
661                                                                   left,
662                                                                   right[i]);
663       }
664     };
665 
666     // If rank_1 == rank_2 == no_contr we have built up all of the outer
667     // loop. Now, it is time to do the actual contraction:
668     //
669     // [...]
670     //   {
671     //     result[i_0]..[i_][j_0]..[j_] = 0.;
672     //     for(unsigned int k_0 = 0; k_0 < dim; ++k_0)
673     //       ...
674     //         for(unsigned int k_ = 0; k_ < dim; ++k_)
675     //           result[i_0]..[i_][j_0]..[j_] += left[i_0]..[i_][k_0]..[k_] *
676     //           right[j_0]..[j_][k_0]..[k_];
677     //   }
678     //
679     //  Relay this summation to another helper class.
680 
681     template <int no_contr, int dim>
682     class Contract<no_contr, no_contr, no_contr, dim>
683     {
684     public:
685       template <typename T1, typename T2, typename T3>
686       DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE static void
687       contract(T1 &result, const T2 &left, const T3 &right)
688       {
689         result = Contract2<no_contr, dim>::template contract2<T1>(left, right);
690       }
691     };
692 
693     // Straightforward recursion:
694     //
695     // Contract leftmost index and recurse one down.
696 
697     template <int no_contr, int dim>
698     class Contract2
699     {
700     public:
701       template <typename T1, typename T2, typename T3>
702       DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE static T1
703       contract2(const T2 &left, const T3 &right)
704       {
705         // Some auto-differentiable numbers need explicit
706         // zero initialization.
707         if (dim == 0)
708           {
709             T1 result = dealii::internal::NumberType<T1>::value(0.0);
710             return result;
711           }
712         else
713           {
714             T1 result =
715               Contract2<no_contr - 1, dim>::template contract2<T1>(left[0],
716                                                                    right[0]);
717             for (unsigned int i = 1; i < dim; ++i)
718               result +=
719                 Contract2<no_contr - 1, dim>::template contract2<T1>(left[i],
720                                                                      right[i]);
721             return result;
722           }
723       }
724     };
725 
726     // A contraction of two objects of order 0 is just a scalar
727     // multiplication:
728 
729     template <int dim>
730     class Contract2<0, dim>
731     {
732     public:
733       template <typename T1, typename T2, typename T3>
734       constexpr DEAL_II_ALWAYS_INLINE static T1
735       contract2(const T2 &left, const T3 &right)
736       {
737         return left * right;
738       }
739     };
740 
741 
742     // -------------------------------------------------------------------------
743     // Implementation of helper classes for contract3
744     // -------------------------------------------------------------------------
745 
746     // Fully contract three tensorial objects
747     //
748     // As long as rank_1 > 0, recurse over left and middle:
749     //
750     // for(unsigned int i_0; i_0 < dim; ++i_0)
751     //   ...
752     //     for(i_; i_ < dim; ++i_)
753     //       [...]
754     //         left[i_0]..[i_] ... middle[i_0]..[i_] ... right
755 
756     template <int rank_1, int rank_2, int dim>
757     class Contract3
758     {
759     public:
760       template <typename T1, typename T2, typename T3, typename T4>
761       DEAL_II_CONSTEXPR static inline T1
762       contract3(const T2 &left, const T3 &middle, const T4 &right)
763       {
764         // Some auto-differentiable numbers need explicit
765         // zero initialization.
766         T1 result = dealii::internal::NumberType<T1>::value(0.0);
767         for (unsigned int i = 0; i < dim; ++i)
768           result += Contract3<rank_1 - 1, rank_2, dim>::template contract3<T1>(
769             left[i], middle[i], right);
770         return result;
771       }
772     };
773 
774     // If rank_1 ==0, continue to recurse over middle and right:
775     //
776     // for(unsigned int i_0; i_0 < dim; ++i_0)
777     //   ...
778     //     for(i_; i_ < dim; ++i_)
779     //       for(unsigned int j_0; j_0 < dim; ++j_0)
780     //         ...
781     //           for(j_; j_ < dim; ++j_)
782     //             [...]
783     //               left[i_0]..[i_] ... middle[i_0]..[i_][j_0]..[j_] ...
784     //               right[j_0]..[j_]
785 
786     template <int rank_2, int dim>
787     class Contract3<0, rank_2, dim>
788     {
789     public:
790       template <typename T1, typename T2, typename T3, typename T4>
791       DEAL_II_CONSTEXPR static inline T1
792       contract3(const T2 &left, const T3 &middle, const T4 &right)
793       {
794         // Some auto-differentiable numbers need explicit
795         // zero initialization.
796         T1 result = dealii::internal::NumberType<T1>::value(0.0);
797         for (unsigned int i = 0; i < dim; ++i)
798           result +=
799             Contract3<0, rank_2 - 1, dim>::template contract3<T1>(left,
800                                                                   middle[i],
801                                                                   right[i]);
802         return result;
803       }
804     };
805 
806     // Contraction of three tensorial objects of rank 0 is just a scalar
807     // multiplication.
808 
809     template <int dim>
810     class Contract3<0, 0, dim>
811     {
812     public:
813       template <typename T1, typename T2, typename T3, typename T4>
814       constexpr static T1
815       contract3(const T2 &left, const T3 &middle, const T4 &right)
816       {
817         return left * middle * right;
818       }
819     };
820 
821     // -------------------------------------------------------------------------
822 
823   } /* namespace internal */
824 } /* namespace TensorAccessors */
825 
826 DEAL_II_NAMESPACE_CLOSE
827 
828 #endif /* dealii_tensor_accessors_h */
829