1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1998 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_tensor_h
17 #define dealii_tensor_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/numbers.h>
23 #include <deal.II/base/table_indices.h>
24 #include <deal.II/base/template_constraints.h>
25 #include <deal.II/base/tensor_accessors.h>
26 #include <deal.II/base/utilities.h>
27 
28 #ifdef DEAL_II_WITH_ADOLC
29 #  include <adolc/adouble.h> // Taped double
30 #endif
31 
32 #include <cmath>
33 #include <ostream>
34 #include <utility>
35 #include <vector>
36 
37 
38 DEAL_II_NAMESPACE_OPEN
39 
40 // Forward declarations:
41 #ifndef DOXYGEN
42 template <typename ElementType, typename MemorySpace>
43 class ArrayView;
44 template <int dim, typename Number>
45 class Point;
46 template <int rank_, int dim, typename Number = double>
47 class Tensor;
48 template <typename Number>
49 class Vector;
50 template <typename number>
51 class FullMatrix;
52 namespace Differentiation
53 {
54   namespace SD
55   {
56     class Expression;
57   }
58 } // namespace Differentiation
59 #endif
60 
61 
62 /**
63  * This class is a specialized version of the <tt>Tensor<rank,dim,Number></tt>
64  * class. It handles tensors of rank zero, i.e. scalars. The second template
65  * argument @p dim is ignored.
66  *
67  * This class exists because in some cases we want to construct objects of
68  * type Tensor@<spacedim-dim,dim,Number@>, which should expand to scalars,
69  * vectors, matrices, etc, depending on the values of the template arguments
70  * @p dim and @p spacedim. We therefore need a class that acts as a scalar
71  * (i.e. @p Number) for all purposes but is part of the Tensor template
72  * family.
73  *
74  * @tparam dim An integer that denotes the dimension of the space in which
75  * this tensor operates. This of course equals the number of coordinates that
76  * identify a point and rank-1 tensor. Since the current object is a rank-0
77  * tensor (a scalar), this template argument has no meaning for this class.
78  *
79  * @tparam Number The data type in which the tensor elements are to be stored.
80  * This will, in almost all cases, simply be the default @p double, but there
81  * are cases where one may want to store elements in a different (and always
82  * scalar) type. It can be used to base tensors on @p float or @p complex
83  * numbers or any other data type that implements basic arithmetic operations.
84  * Another example would be a type that allows for Automatic Differentiation
85  * (see, for example, the Sacado type used in step-33) and thereby can
86  * generate analytic (spatial) derivatives of a function that takes a tensor
87  * as argument.
88  *
89  * @ingroup geomprimitives
90  */
91 template <int dim, typename Number>
92 class Tensor<0, dim, Number>
93 {
94 public:
95   static_assert(dim >= 0,
96                 "Tensors must have a dimension greater than or equal to one.");
97 
98   /**
99    * Provide a way to get the dimension of an object without explicit
100    * knowledge of it's data type. Implementation is this way instead of
101    * providing a function <tt>dimension()</tt> because now it is possible to
102    * get the dimension at compile time without the expansion and preevaluation
103    * of an inlined function; the compiler may therefore produce more efficient
104    * code and you may use this value to declare other data types.
105    */
106   static constexpr unsigned int dimension = dim;
107 
108   /**
109    * Publish the rank of this tensor to the outside world.
110    */
111   static constexpr unsigned int rank = 0;
112 
113   /**
114    * Number of independent components of a tensor of rank 0.
115    */
116   static constexpr unsigned int n_independent_components = 1;
117 
118   /**
119    * Declare a type that has holds real-valued numbers with the same precision
120    * as the template argument to this class. For std::complex<number>, this
121    * corresponds to type number, and it is equal to Number for all other
122    * cases. See also the respective field in Vector<Number>.
123    *
124    * This alias is used to represent the return type of norms.
125    */
126   using real_type = typename numbers::NumberTraits<Number>::real_type;
127 
128   /**
129    * Type of objects encapsulated by this container and returned by
130    * operator[](). This is a scalar number type for a rank 0 tensor.
131    */
132   using value_type = Number;
133 
134   /**
135    * Declare an array type which can be used to initialize an object of this
136    * type statically. In case of a tensor of rank 0 this is just the scalar
137    * number type Number.
138    */
139   using array_type = Number;
140 
141   /**
142    * Constructor. Set to zero.
143    *
144    * @note This function can also be used in CUDA device code.
145    */
146   constexpr DEAL_II_CUDA_HOST_DEV
147   Tensor();
148 
149   /**
150    * Constructor from tensors with different underlying scalar type. This
151    * obviously requires that the @p OtherNumber type is convertible to @p
152    * Number.
153    *
154    * @note This function can also be used in CUDA device code.
155    */
156   template <typename OtherNumber>
157   constexpr DEAL_II_CUDA_HOST_DEV
158   Tensor(const Tensor<0, dim, OtherNumber> &initializer);
159 
160   /**
161    * Constructor, where the data is copied from a C-style array.
162    *
163    * @note This function can also be used in CUDA device code.
164    */
165   template <typename OtherNumber>
166   constexpr DEAL_II_CUDA_HOST_DEV
167   Tensor(const OtherNumber &initializer);
168 
169   /**
170    * Return a pointer to the first element of the underlying storage.
171    */
172   Number *
173   begin_raw();
174 
175   /**
176    * Return a const pointer to the first element of the underlying storage.
177    */
178   const Number *
179   begin_raw() const;
180 
181   /**
182    * Return a pointer to the element past the end of the underlying storage.
183    */
184   Number *
185   end_raw();
186 
187   /**
188    * Return a const pointer to the element past the end of the underlying
189    * storage.
190    */
191   const Number *
192   end_raw() const;
193 
194   /**
195    * Return a reference to the encapsulated Number object. Since rank-0
196    * tensors are scalars, this is a natural operation.
197    *
198    * This is the non-const conversion operator that returns a writable
199    * reference.
200    *
201    * @note This function can also be used in CUDA device code.
202    */
203   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV operator Number &();
204 
205   /**
206    * Return a reference to the encapsulated Number object. Since rank-0
207    * tensors are scalars, this is a natural operation.
208    *
209    * This is the const conversion operator that returns a read-only reference.
210    *
211    * @note This function can also be used in CUDA device code.
212    */
213   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV operator const Number &() const;
214 
215   /**
216    * Assignment from tensors with different underlying scalar type. This
217    * obviously requires that the @p OtherNumber type is convertible to @p
218    * Number.
219    *
220    * @note This function can also be used in CUDA device code.
221    */
222   template <typename OtherNumber>
223   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
224                                           operator=(const Tensor<0, dim, OtherNumber> &rhs);
225 
226 #ifdef __INTEL_COMPILER
227   /**
228    * Assignment from tensors with same underlying scalar type.
229    * This is needed for ICC15 because it can't generate a suitable
230    * copy constructor for Sacado::Rad::ADvar types automatically.
231    * See https://github.com/dealii/dealii/pull/5865.
232    *
233    * @note This function can also be used in CUDA device code.
234    */
235   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
236                                           operator=(const Tensor<0, dim, Number> &rhs);
237 #endif
238 
239   /**
240    * This operator assigns a scalar to a tensor. This obviously requires
241    * that the @p OtherNumber type is convertible to @p Number.
242    *
243    * @note This function can also be used in CUDA device code.
244    */
245   template <typename OtherNumber>
246   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
247                                           operator=(const OtherNumber &d);
248 
249   /**
250    * Test for equality of two tensors.
251    */
252   template <typename OtherNumber>
253   DEAL_II_CONSTEXPR bool
254   operator==(const Tensor<0, dim, OtherNumber> &rhs) const;
255 
256   /**
257    * Test for inequality of two tensors.
258    */
259   template <typename OtherNumber>
260   constexpr bool
261   operator!=(const Tensor<0, dim, OtherNumber> &rhs) const;
262 
263   /**
264    * Add another scalar.
265    *
266    * @note This function can also be used in CUDA device code.
267    */
268   template <typename OtherNumber>
269   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
270                                           operator+=(const Tensor<0, dim, OtherNumber> &rhs);
271 
272   /**
273    * Subtract another scalar.
274    *
275    * @note This function can also be used in CUDA device code.
276    */
277   template <typename OtherNumber>
278   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
279                                           operator-=(const Tensor<0, dim, OtherNumber> &rhs);
280 
281   /**
282    * Multiply the scalar with a <tt>factor</tt>.
283    *
284    * @note This function can also be used in CUDA device code.
285    */
286   template <typename OtherNumber>
287   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
288                                           operator*=(const OtherNumber &factor);
289 
290   /**
291    * Divide the scalar by <tt>factor</tt>.
292    *
293    * @note This function can also be used in CUDA device code.
294    */
295   template <typename OtherNumber>
296   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
297                                           operator/=(const OtherNumber &factor);
298 
299   /**
300    * Tensor with inverted entries.
301    *
302    * @note This function can also be used in CUDA device code.
303    */
304   constexpr DEAL_II_CUDA_HOST_DEV Tensor
305                                   operator-() const;
306 
307   /**
308    * Reset all values to zero.
309    *
310    * Note that this is partly inconsistent with the semantics of the @p
311    * clear() member functions of the standard library containers and of
312    * several other classes within deal.II, which not only reset the values of
313    * stored elements to zero, but release all memory and return the object
314    * into a virginial state. However, since the size of objects of the present
315    * type is determined by its template parameters, resizing is not an option,
316    * and indeed the state where all elements have a zero value is the state
317    * right after construction of such an object.
318    */
319   DEAL_II_CONSTEXPR void
320   clear();
321 
322   /**
323    * Return the Frobenius-norm of a tensor, i.e. the square root of the sum of
324    * the absolute squares of all entries. For the present case of rank-1
325    * tensors, this equals the usual <tt>l<sub>2</sub></tt> norm of the vector.
326    */
327   real_type
328   norm() const;
329 
330   /**
331    * Return the square of the Frobenius-norm of a tensor, i.e. the sum of the
332    * absolute squares of all entries.
333    *
334    * @note This function can also be used in CUDA device code.
335    */
336   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV real_type
337                                           norm_square() const;
338 
339   /**
340    * Read or write the data of this object to or from a stream for the purpose
341    * of serialization
342    */
343   template <class Archive>
344   void
345   serialize(Archive &ar, const unsigned int version);
346 
347   /**
348    * Internal type declaration that is used to specialize the return type of
349    * operator[]() for Tensor<1,dim,Number>
350    */
351   using tensor_type = Number;
352 
353 private:
354   /**
355    * The value of this scalar object.
356    */
357   Number value;
358 
359   /**
360    * Internal helper function for unroll.
361    */
362   template <typename OtherNumber>
363   void
364   unroll_recursion(Vector<OtherNumber> &result,
365                    unsigned int &       start_index) const;
366 
367   // Allow an arbitrary Tensor to access the underlying values.
368   template <int, int, typename>
369   friend class Tensor;
370 };
371 
372 
373 
374 /**
375  * A general tensor class with an arbitrary rank, i.e. with an arbitrary
376  * number of indices. The Tensor class provides an indexing operator and a bit
377  * of infrastructure, but most functionality is recursively handed down to
378  * tensors of rank 1 or put into external templated functions, e.g. the
379  * <tt>contract</tt> family.
380  *
381  * The rank of a tensor specifies which types of physical quantities it can
382  * represent:
383  * <ul>
384  *   <li> A rank-0 tensor is a scalar that can store quantities such as
385  *     temperature or pressure. These scalar quantities are shown in this
386  *     documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
387  *   </li>
388  *   <li> A rank-1 tensor is a vector with @p dim components and it can
389  *     represent vector quantities such as velocity, displacement, electric
390  *     field, etc. They can also describe the gradient of a scalar field.
391  *     The notation used for rank-1 tensors is bold-faced lower-case Latin
392  *     letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$.
393  *     The components of a rank-1 tensor such as $\mathbf a$ are represented
394  *     as $a_i$ where $i$ is an index between 0 and <tt>dim-1</tt>.
395  *   </li>
396  *   <li> A rank-2 tensor is a linear operator that can transform a vector
397  *     into another vector. These tensors are similar to matrices with
398  *     $\text{dim} \times \text{dim}$ components. There is a related class
399  *     SymmetricTensor<2,dim> for tensors of rank 2 whose elements are
400  *     symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case
401  *     Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek
402  *     letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$.
403  *     The components of a rank 2 tensor such as $\mathbf A$ are shown with
404  *     two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the
405  *     gradients of vector fields (deformation gradient, velocity gradient,
406  *     etc.) or Hessians of scalar fields. Additionally, mechanical stress
407  *     tensors are rank-2 tensors that map the unit normal vectors of internal
408  *     surfaces into local traction (force per unit area) vectors.
409  *   </li>
410  *   <li> Tensors with ranks higher than 2 are similarly defined in a
411  *     consistent manner. They have $\text{dim}^{\text{rank}}$ components and
412  *     the number of indices required to identify a component equals
413  *     <tt>rank</tt>. For rank-4 tensors, a symmetric variant called
414  *     SymmetricTensor<4,dim> exists.
415  *   </li>
416  * </ul>
417  *
418  * Using this tensor class for objects of rank 2 has advantages over matrices
419  * in many cases since the dimension is known to the compiler as well as the
420  * location of the data. It is therefore possible to produce far more
421  * efficient code than for matrices with runtime-dependent dimension. It also
422  * makes the code easier to read because of the semantic difference between a
423  * tensor (an object that relates to a coordinate system and has
424  * transformation properties with regard to coordinate rotations and
425  * transforms) and matrices (which we consider as operators on arbitrary
426  * vector spaces related to linear algebra things).
427  *
428  * @tparam rank_ An integer that denotes the rank of this tensor. A
429  * specialization of this class exists for rank-0 tensors.
430  *
431  * @tparam dim An integer that denotes the dimension of the space in which
432  * this tensor operates. This of course equals the number of coordinates that
433  * identify a point and rank-1 tensor.
434  *
435  * @tparam Number The data type in which the tensor elements are to be stored.
436  * This will, in almost all cases, simply be the default @p double, but there
437  * are cases where one may want to store elements in a different (and always
438  * scalar) type. It can be used to base tensors on @p float or @p complex
439  * numbers or any other data type that implements basic arithmetic operations.
440  * Another example would be a type that allows for Automatic Differentiation
441  * (see, for example, the Sacado type used in step-33) and thereby can
442  * generate analytic (spatial) derivatives of a function that takes a tensor
443  * as argument.
444  *
445  * @ingroup geomprimitives
446  */
447 template <int rank_, int dim, typename Number>
448 class Tensor
449 {
450 public:
451   static_assert(rank_ >= 0,
452                 "Tensors must have a rank greater than or equal to one.");
453   static_assert(dim >= 0,
454                 "Tensors must have a dimension greater than or equal to one.");
455   /**
456    * Provide a way to get the dimension of an object without explicit
457    * knowledge of it's data type. Implementation is this way instead of
458    * providing a function <tt>dimension()</tt> because now it is possible to
459    * get the dimension at compile time without the expansion and preevaluation
460    * of an inlined function; the compiler may therefore produce more efficient
461    * code and you may use this value to declare other data types.
462    */
463   static constexpr unsigned int dimension = dim;
464 
465   /**
466    * Publish the rank of this tensor to the outside world.
467    */
468   static constexpr unsigned int rank = rank_;
469 
470   /**
471    * Number of independent components of a tensor of current rank. This is dim
472    * times the number of independent components of each sub-tensor.
473    */
474   static constexpr unsigned int n_independent_components =
475     Tensor<rank_ - 1, dim>::n_independent_components * dim;
476 
477   /**
478    * Type of objects encapsulated by this container and returned by
479    * operator[](). This is a tensor of lower rank for a general tensor, and a
480    * scalar number type for Tensor<1,dim,Number>.
481    */
482   using value_type = typename Tensor<rank_ - 1, dim, Number>::tensor_type;
483 
484   /**
485    * Declare an array type which can be used to initialize an object of this
486    * type statically. For `dim == 0`, its size is 1. Otherwise, it is `dim`.
487    */
488   using array_type =
489     typename Tensor<rank_ - 1, dim, Number>::array_type[(dim != 0) ? dim : 1];
490 
491   /**
492    * Constructor. Initialize all entries to zero.
493    *
494    * @note This function can also be used in CUDA device code.
495    */
496   constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
Tensor()497                                   Tensor()
498 #ifdef DEAL_II_MSVC
499     : values{}
500   {}
501 #else
502     = default;
503 #endif
504 
505   /**
506    * A constructor where the data is copied from a C-style array.
507    *
508    * @note This function can also be used in CUDA device code.
509    */
510   constexpr DEAL_II_CUDA_HOST_DEV explicit Tensor(
511     const array_type &initializer);
512 
513   /**
514    * A constructor where the data is copied from an ArrayView object.
515    * Obviously, the ArrayView object must represent a stretch of
516    * data of size `dim`<sup>`rank`</sup>. The sequentially ordered elements
517    * of the argument `initializer` are interpreted as described by
518    * unrolled_to_component_index().
519    *
520    * This constructor obviously requires that the @p ElementType type is
521    * either equal to @p Number, or is convertible to @p Number.
522    * Number.
523    *
524    * @note This function can also be used in CUDA device code.
525    */
526   template <typename ElementType, typename MemorySpace>
527   constexpr DEAL_II_CUDA_HOST_DEV explicit Tensor(
528     const ArrayView<ElementType, MemorySpace> &initializer);
529 
530   /**
531    * Constructor from tensors with different underlying scalar type. This
532    * obviously requires that the @p OtherNumber type is convertible to @p
533    * Number.
534    *
535    * @note This function can also be used in CUDA device code.
536    */
537   template <typename OtherNumber>
538   constexpr DEAL_II_CUDA_HOST_DEV
539   Tensor(const Tensor<rank_, dim, OtherNumber> &initializer);
540 
541   /**
542    * Constructor that converts from a "tensor of tensors".
543    */
544   template <typename OtherNumber>
545   constexpr Tensor(
546     const Tensor<1, dim, Tensor<rank_ - 1, dim, OtherNumber>> &initializer);
547 
548   /**
549    * Conversion operator to tensor of tensors.
550    */
551   template <typename OtherNumber>
552   constexpr
553   operator Tensor<1, dim, Tensor<rank_ - 1, dim, OtherNumber>>() const;
554 
555   /**
556    * Read-Write access operator.
557    *
558    * @note This function can also be used in CUDA device code.
559    */
560   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV value_type &
561                                           operator[](const unsigned int i);
562 
563   /**
564    * Read-only access operator.
565    *
566    * @note This function can also be used in CUDA device code.
567    */
568   constexpr DEAL_II_CUDA_HOST_DEV const value_type &
569                                         operator[](const unsigned int i) const;
570 
571   /**
572    * Read access using TableIndices <tt>indices</tt>
573    */
574   DEAL_II_CONSTEXPR const Number &
575                           operator[](const TableIndices<rank_> &indices) const;
576 
577   /**
578    * Read and write access using TableIndices <tt>indices</tt>
579    */
580   DEAL_II_CONSTEXPR Number &operator[](const TableIndices<rank_> &indices);
581 
582   /**
583    * Return a pointer to the first element of the underlying storage.
584    */
585   Number *
586   begin_raw();
587 
588   /**
589    * Return a const pointer to the first element of the underlying storage.
590    */
591   const Number *
592   begin_raw() const;
593 
594   /**
595    * Return a pointer to the element past the end of the underlying storage.
596    */
597   Number *
598   end_raw();
599 
600   /**
601    * Return a pointer to the element past the end of the underlying storage.
602    */
603   const Number *
604   end_raw() const;
605 
606   /**
607    * Assignment operator from tensors with different underlying scalar type.
608    * This obviously requires that the @p OtherNumber type is convertible to @p
609    * Number.
610    *
611    * @note This function can also be used in CUDA device code.
612    */
613   template <typename OtherNumber>
614   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
615                                           operator=(const Tensor<rank_, dim, OtherNumber> &rhs);
616 
617   /**
618    * This operator assigns a scalar to a tensor. To avoid confusion with what
619    * exactly it means to assign a scalar value to a tensor, zero is the only
620    * value allowed for <tt>d</tt>, allowing the intuitive notation
621    * <tt>t=0</tt> to reset all elements of the tensor to zero.
622    */
623   DEAL_II_CONSTEXPR Tensor &
624                     operator=(const Number &d);
625 
626   /**
627    * Test for equality of two tensors.
628    */
629   template <typename OtherNumber>
630   DEAL_II_CONSTEXPR bool
631   operator==(const Tensor<rank_, dim, OtherNumber> &) const;
632 
633   /**
634    * Test for inequality of two tensors.
635    */
636   template <typename OtherNumber>
637   constexpr bool
638   operator!=(const Tensor<rank_, dim, OtherNumber> &) const;
639 
640   /**
641    * Add another tensor.
642    *
643    * @note This function can also be used in CUDA device code.
644    */
645   template <typename OtherNumber>
646   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
647                                           operator+=(const Tensor<rank_, dim, OtherNumber> &);
648 
649   /**
650    * Subtract another tensor.
651    *
652    * @note This function can also be used in CUDA device code.
653    */
654   template <typename OtherNumber>
655   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
656                                           operator-=(const Tensor<rank_, dim, OtherNumber> &);
657 
658   /**
659    * Scale the tensor by <tt>factor</tt>, i.e. multiply all components by
660    * <tt>factor</tt>.
661    *
662    * @note This function can also be used in CUDA device code.
663    */
664   template <typename OtherNumber>
665   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
666                                           operator*=(const OtherNumber &factor);
667 
668   /**
669    * Scale the vector by <tt>1/factor</tt>.
670    *
671    * @note This function can also be used in CUDA device code.
672    */
673   template <typename OtherNumber>
674   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor &
675                                           operator/=(const OtherNumber &factor);
676 
677   /**
678    * Unary minus operator. Negate all entries of a tensor.
679    *
680    * @note This function can also be used in CUDA device code.
681    */
682   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV Tensor
683                                           operator-() const;
684 
685   /**
686    * Reset all values to zero.
687    *
688    * Note that this is partly inconsistent with the semantics of the @p
689    * clear() member functions of the standard library containers and of
690    * several other classes within deal.II, which not only reset the values of
691    * stored elements to zero, but release all memory and return the object
692    * into a virginial state. However, since the size of objects of the present
693    * type is determined by its template parameters, resizing is not an option,
694    * and indeed the state where all elements have a zero value is the state
695    * right after construction of such an object.
696    */
697   DEAL_II_CONSTEXPR void
698   clear();
699 
700   /**
701    * Return the Frobenius-norm of a tensor, i.e. the square root of the sum of
702    * the absolute squares of all entries. For the present case of rank-1
703    * tensors, this equals the usual <tt>l<sub>2</sub></tt> norm of the vector.
704    *
705    * @note This function can also be used in CUDA device code.
706    */
707   DEAL_II_CUDA_HOST_DEV
708   typename numbers::NumberTraits<Number>::real_type
709   norm() const;
710 
711   /**
712    * Return the square of the Frobenius-norm of a tensor, i.e. the sum of the
713    * absolute squares of all entries.
714    *
715    * @note This function can also be used in CUDA device code.
716    */
717   DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV
718     typename numbers::NumberTraits<Number>::real_type
719     norm_square() const;
720 
721   /**
722    * Fill a vector with all tensor elements.
723    *
724    * This function unrolls all tensor entries into a single, linearly numbered
725    * vector. As usual in C++, the rightmost index of the tensor marches
726    * fastest.
727    */
728   template <typename OtherNumber>
729   void
730   unroll(Vector<OtherNumber> &result) const;
731 
732   /**
733    * Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$
734    * for the element of the tensor indexed by the argument to the function.
735    */
736   static DEAL_II_CONSTEXPR unsigned int
737   component_to_unrolled_index(const TableIndices<rank_> &indices);
738 
739   /**
740    * Opposite of  component_to_unrolled_index: For an index in the range
741    * $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would
742    * correspond to.
743    */
744   static DEAL_II_CONSTEXPR TableIndices<rank_>
745                            unrolled_to_component_indices(const unsigned int i);
746 
747   /**
748    * Determine an estimate for the memory consumption (in bytes) of this
749    * object.
750    */
751   static constexpr std::size_t
752   memory_consumption();
753 
754   /**
755    * Read or write the data of this object to or from a stream for the purpose
756    * of serialization
757    */
758   template <class Archive>
759   void
760   serialize(Archive &ar, const unsigned int version);
761 
762   /**
763    * Internal type declaration that is used to specialize the return type of
764    * operator[]() for Tensor<1,dim,Number>
765    */
766   using tensor_type = Tensor<rank_, dim, Number>;
767 
768 private:
769   /**
770    * Array of tensors holding the subelements.
771    */
772   Tensor<rank_ - 1, dim, Number> values[(dim != 0) ? dim : 1];
773   // ... avoid a compiler warning in case of dim == 0 and ensure that the
774   // array always has positive size.
775 
776   /**
777    * Internal helper function for unroll.
778    */
779   template <typename OtherNumber>
780   void
781   unroll_recursion(Vector<OtherNumber> &result,
782                    unsigned int &       start_index) const;
783 
784   /**
785    * This constructor is for internal use. It provides a way
786    * to create constexpr constructors for Tensor<rank, dim, Number>
787    *
788    * @note This function can also be used in CUDA device code.
789    */
790   template <typename ArrayLike, std::size_t... Indices>
791   constexpr DEAL_II_CUDA_HOST_DEV
792   Tensor(const ArrayLike &initializer, std::index_sequence<Indices...>);
793 
794   // Allow an arbitrary Tensor to access the underlying values.
795   template <int, int, typename>
796   friend class Tensor;
797 
798   // Point is allowed access to the coordinates. This is supposed to improve
799   // speed.
800   friend class Point<dim, Number>;
801 };
802 
803 
804 #ifndef DOXYGEN
805 namespace internal
806 {
807   // Workaround: The following 4 overloads are necessary to be able to
808   // compile the library with Apple Clang 8 and older. We should remove
809   // these overloads again when we bump the minimal required version to
810   // something later than clang-3.6 / Apple Clang 6.3.
811   template <int rank, int dim, typename T, typename U>
812   struct ProductTypeImpl<Tensor<rank, dim, T>, std::complex<U>>
813   {
814     using type =
815       Tensor<rank, dim, std::complex<typename ProductType<T, U>::type>>;
816   };
817 
818   template <int rank, int dim, typename T, typename U>
819   struct ProductTypeImpl<Tensor<rank, dim, std::complex<T>>, std::complex<U>>
820   {
821     using type =
822       Tensor<rank, dim, std::complex<typename ProductType<T, U>::type>>;
823   };
824 
825   template <typename T, int rank, int dim, typename U>
826   struct ProductTypeImpl<std::complex<T>, Tensor<rank, dim, U>>
827   {
828     using type =
829       Tensor<rank, dim, std::complex<typename ProductType<T, U>::type>>;
830   };
831 
832   template <int rank, int dim, typename T, typename U>
833   struct ProductTypeImpl<std::complex<T>, Tensor<rank, dim, std::complex<U>>>
834   {
835     using type =
836       Tensor<rank, dim, std::complex<typename ProductType<T, U>::type>>;
837   };
838   // end workaround
839 
840   /**
841    * The structs below are needed to initialize nested Tensor objects.
842    * Also see numbers.h for another specialization.
843    */
844   template <int rank, int dim, typename T>
845   struct NumberType<Tensor<rank, dim, T>>
846   {
847     static constexpr DEAL_II_ALWAYS_INLINE const Tensor<rank, dim, T> &
848                                                  value(const Tensor<rank, dim, T> &t)
849     {
850       return t;
851     }
852 
853     static DEAL_II_CONSTEXPR DEAL_II_ALWAYS_INLINE Tensor<rank, dim, T>
854                                                    value(const T &t)
855     {
856       Tensor<rank, dim, T> tmp;
857       tmp = t;
858       return tmp;
859     }
860   };
861 } // namespace internal
862 
863 
864 /*---------------------- Inline functions: Tensor<0,dim> ---------------------*/
865 
866 
867 template <int dim, typename Number>
868 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
869                                 Tensor<0, dim, Number>::Tensor()
870   // Some auto-differentiable numbers need explicit
871   // zero initialization such as adtl::adouble.
872   : Tensor{0.0}
873 {}
874 
875 
876 
877 template <int dim, typename Number>
878 template <typename OtherNumber>
879 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
880                                 Tensor<0, dim, Number>::Tensor(const OtherNumber &initializer)
881   : value(internal::NumberType<Number>::value(initializer))
882 {}
883 
884 
885 
886 template <int dim, typename Number>
887 template <typename OtherNumber>
888 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
889                                 Tensor<0, dim, Number>::Tensor(const Tensor<0, dim, OtherNumber> &p)
890   : Tensor{p.value}
891 {}
892 
893 
894 
895 template <int dim, typename Number>
896 inline Number *
897 Tensor<0, dim, Number>::begin_raw()
898 {
899   return std::addressof(value);
900 }
901 
902 
903 
904 template <int dim, typename Number>
905 inline const Number *
906 Tensor<0, dim, Number>::begin_raw() const
907 {
908   return std::addressof(value);
909 }
910 
911 
912 
913 template <int dim, typename Number>
914 inline Number *
915 Tensor<0, dim, Number>::end_raw()
916 {
917   return begin_raw() + n_independent_components;
918 }
919 
920 
921 
922 template <int dim, typename Number>
923 const Number *
924 Tensor<0, dim, Number>::end_raw() const
925 {
926   return begin_raw() + n_independent_components;
927 }
928 
929 
930 
931 template <int dim, typename Number>
932 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
933   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number>::operator Number &()
934 {
935   // We cannot use Assert inside a CUDA kernel
936 #  ifndef __CUDA_ARCH__
937   Assert(dim != 0,
938          ExcMessage("Cannot access an object of type Tensor<0,0,Number>"));
939 #  endif
940   return value;
941 }
942 
943 
944 template <int dim, typename Number>
945 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
946   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number>::operator const Number &() const
947 {
948   // We cannot use Assert inside a CUDA kernel
949 #  ifndef __CUDA_ARCH__
950   Assert(dim != 0,
951          ExcMessage("Cannot access an object of type Tensor<0,0,Number>"));
952 #  endif
953   return value;
954 }
955 
956 
957 template <int dim, typename Number>
958 template <typename OtherNumber>
959 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
960   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
961   Tensor<0, dim, Number>::operator=(const Tensor<0, dim, OtherNumber> &p)
962 {
963   value = internal::NumberType<Number>::value(p);
964   return *this;
965 }
966 
967 
968 #  ifdef __INTEL_COMPILER
969 template <int dim, typename Number>
970 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
971   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
972   Tensor<0, dim, Number>::operator=(const Tensor<0, dim, Number> &p)
973 {
974   value = p.value;
975   return *this;
976 }
977 #  endif
978 
979 
980 template <int dim, typename Number>
981 template <typename OtherNumber>
982 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
983   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
984   Tensor<0, dim, Number>::operator=(const OtherNumber &d)
985 {
986   value = internal::NumberType<Number>::value(d);
987   return *this;
988 }
989 
990 
991 template <int dim, typename Number>
992 template <typename OtherNumber>
993 DEAL_II_CONSTEXPR inline bool
994 Tensor<0, dim, Number>::operator==(const Tensor<0, dim, OtherNumber> &p) const
995 {
996 #  if defined(DEAL_II_ADOLC_WITH_ADVANCED_BRANCHING)
997   Assert(!(std::is_same<Number, adouble>::value ||
998            std::is_same<OtherNumber, adouble>::value),
999          ExcMessage(
1000            "The Tensor equality operator for ADOL-C taped numbers has not yet "
1001            "been extended to support advanced branching."));
1002 #  endif
1003 
1004   return numbers::values_are_equal(value, p.value);
1005 }
1006 
1007 
1008 template <int dim, typename Number>
1009 template <typename OtherNumber>
1010 constexpr bool
1011 Tensor<0, dim, Number>::operator!=(const Tensor<0, dim, OtherNumber> &p) const
1012 {
1013   return !((*this) == p);
1014 }
1015 
1016 
1017 template <int dim, typename Number>
1018 template <typename OtherNumber>
1019 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1020   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
1021   Tensor<0, dim, Number>::operator+=(const Tensor<0, dim, OtherNumber> &p)
1022 {
1023   value += p.value;
1024   return *this;
1025 }
1026 
1027 
1028 template <int dim, typename Number>
1029 template <typename OtherNumber>
1030 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1031   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
1032   Tensor<0, dim, Number>::operator-=(const Tensor<0, dim, OtherNumber> &p)
1033 {
1034   value -= p.value;
1035   return *this;
1036 }
1037 
1038 
1039 
1040 namespace internal
1041 {
1042   namespace ComplexWorkaround
1043   {
1044     template <typename Number, typename OtherNumber>
1045     DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV void
1046                                                    multiply_assign_scalar(Number &val, const OtherNumber &s)
1047     {
1048       val *= s;
1049     }
1050 
1051 #  ifdef __CUDA_ARCH__
1052     template <typename Number, typename OtherNumber>
1053     DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV void
1054                                                    multiply_assign_scalar(std::complex<Number> &, const OtherNumber &)
1055     {
1056       printf("This function is not implemented for std::complex<Number>!\n");
1057       assert(false);
1058     }
1059 #  endif
1060   } // namespace ComplexWorkaround
1061 } // namespace internal
1062 
1063 
1064 template <int dim, typename Number>
1065 template <typename OtherNumber>
1066 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1067   DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
1068   Tensor<0, dim, Number>::operator*=(const OtherNumber &s)
1069 {
1070   internal::ComplexWorkaround::multiply_assign_scalar(value, s);
1071   return *this;
1072 }
1073 
1074 
1075 
1076 template <int dim, typename Number>
1077 template <typename OtherNumber>
1078 DEAL_II_CONSTEXPR inline DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number> &
1079 Tensor<0, dim, Number>::operator/=(const OtherNumber &s)
1080 {
1081   value /= s;
1082   return *this;
1083 }
1084 
1085 
1086 template <int dim, typename Number>
1087 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV Tensor<0, dim, Number>
1088 Tensor<0, dim, Number>::operator-() const
1089 {
1090   return -value;
1091 }
1092 
1093 
1094 template <int dim, typename Number>
1095 inline DEAL_II_ALWAYS_INLINE typename Tensor<0, dim, Number>::real_type
1096 Tensor<0, dim, Number>::norm() const
1097 {
1098   Assert(dim != 0,
1099          ExcMessage("Cannot access an object of type Tensor<0,0,Number>"));
1100   return numbers::NumberTraits<Number>::abs(value);
1101 }
1102 
1103 
1104 template <int dim, typename Number>
1105 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1106   typename Tensor<0, dim, Number>::real_type
1107   Tensor<0, dim, Number>::norm_square() const
1108 {
1109   // We cannot use Assert inside a CUDA kernel
1110 #  ifndef __CUDA_ARCH__
1111   Assert(dim != 0,
1112          ExcMessage("Cannot access an object of type Tensor<0,0,Number>"));
1113 #  endif
1114   return numbers::NumberTraits<Number>::abs_square(value);
1115 }
1116 
1117 
1118 template <int dim, typename Number>
1119 template <typename OtherNumber>
1120 inline void
1121 Tensor<0, dim, Number>::unroll_recursion(Vector<OtherNumber> &result,
1122                                          unsigned int &       index) const
1123 {
1124   Assert(dim != 0,
1125          ExcMessage("Cannot unroll an object of type Tensor<0,0,Number>"));
1126   result[index] = value;
1127   ++index;
1128 }
1129 
1130 
1131 template <int dim, typename Number>
1132 DEAL_II_CONSTEXPR inline void
1133 Tensor<0, dim, Number>::clear()
1134 {
1135   // Some auto-differentiable numbers need explicit
1136   // zero initialization.
1137   value = internal::NumberType<Number>::value(0.0);
1138 }
1139 
1140 
1141 template <int dim, typename Number>
1142 template <class Archive>
1143 inline void
1144 Tensor<0, dim, Number>::serialize(Archive &ar, const unsigned int)
1145 {
1146   ar &value;
1147 }
1148 
1149 
1150 template <int dim, typename Number>
1151 constexpr unsigned int Tensor<0, dim, Number>::n_independent_components;
1152 
1153 
1154 /*-------------------- Inline functions: Tensor<rank,dim> --------------------*/
1155 
1156 template <int rank_, int dim, typename Number>
1157 template <typename ArrayLike, std::size_t... indices>
1158 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1159                                 Tensor<rank_, dim, Number>::Tensor(const ArrayLike &initializer,
1160                                    std::index_sequence<indices...>)
1161   : values{Tensor<rank_ - 1, dim, Number>(initializer[indices])...}
1162 {
1163   static_assert(sizeof...(indices) == dim,
1164                 "dim should match the number of indices");
1165 }
1166 
1167 
1168 template <int rank_, int dim, typename Number>
1169 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1170                                 Tensor<rank_, dim, Number>::Tensor(const array_type &initializer)
1171   : Tensor(initializer, std::make_index_sequence<dim>{})
1172 {}
1173 
1174 
1175 
1176 template <int rank_, int dim, typename Number>
1177 template <typename ElementType, typename MemorySpace>
1178 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1179                                 Tensor<rank_, dim, Number>::Tensor(
1180   const ArrayView<ElementType, MemorySpace> &initializer)
1181 {
1182   AssertDimension(initializer.size(), n_independent_components);
1183 
1184   for (unsigned int i = 0; i < n_independent_components; ++i)
1185     (*this)[unrolled_to_component_indices(i)] = initializer[i];
1186 }
1187 
1188 
1189 
1190 template <int rank_, int dim, typename Number>
1191 template <typename OtherNumber>
1192 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1193                                 Tensor<rank_, dim, Number>::Tensor(
1194   const Tensor<rank_, dim, OtherNumber> &initializer)
1195   : Tensor(initializer, std::make_index_sequence<dim>{})
1196 {}
1197 
1198 
1199 template <int rank_, int dim, typename Number>
1200 template <typename OtherNumber>
1201 constexpr DEAL_II_ALWAYS_INLINE
1202 Tensor<rank_, dim, Number>::Tensor(
1203   const Tensor<1, dim, Tensor<rank_ - 1, dim, OtherNumber>> &initializer)
1204   : Tensor(initializer, std::make_index_sequence<dim>{})
1205 {}
1206 
1207 
1208 template <int rank_, int dim, typename Number>
1209 template <typename OtherNumber>
1210 constexpr DEAL_II_ALWAYS_INLINE Tensor<rank_, dim, Number>::
1211                                 operator Tensor<1, dim, Tensor<rank_ - 1, dim, OtherNumber>>() const
1212 {
1213   return Tensor<1, dim, Tensor<rank_ - 1, dim, Number>>(values);
1214 }
1215 
1216 
1217 
1218 namespace internal
1219 {
1220   namespace TensorSubscriptor
1221   {
1222     template <typename ArrayElementType, int dim>
1223     DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1224       DEAL_II_CUDA_HOST_DEV ArrayElementType &
1225                             subscript(ArrayElementType * values,
1226                                       const unsigned int i,
1227                                       std::integral_constant<int, dim>)
1228     {
1229       // We cannot use Assert in a CUDA kernel
1230 #  ifndef __CUDA_ARCH__
1231       AssertIndexRange(i, dim);
1232 #  endif
1233       return values[i];
1234     }
1235 
1236     // The variables within this struct will be referenced in the next function.
1237     // It is a workaround that allows returning a reference to a static variable
1238     // while allowing constexpr evaluation of the function.
1239     // It has to be defined outside the function because constexpr functions
1240     // cannot define static variables
1241     template <typename ArrayElementType>
1242     struct Uninitialized
1243     {
1244       static ArrayElementType value;
1245     };
1246 
1247     template <typename Type>
1248     Type Uninitialized<Type>::value;
1249 
1250     template <typename ArrayElementType>
1251     DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1252       DEAL_II_CUDA_HOST_DEV ArrayElementType &
1253                             subscript(ArrayElementType *,
1254                                       const unsigned int,
1255                                       std::integral_constant<int, 0>)
1256     {
1257       // We cannot use Assert in a CUDA kernel
1258 #  ifndef __CUDA_ARCH__
1259       Assert(
1260         false,
1261         ExcMessage(
1262           "Cannot access elements of an object of type Tensor<rank,0,Number>."));
1263 #  endif
1264       return Uninitialized<ArrayElementType>::value;
1265     }
1266   } // namespace TensorSubscriptor
1267 } // namespace internal
1268 
1269 
1270 template <int rank_, int dim, typename Number>
1271 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE     DEAL_II_CUDA_HOST_DEV
1272   typename Tensor<rank_, dim, Number>::value_type &Tensor<rank_, dim, Number>::
1273                                                    operator[](const unsigned int i)
1274 {
1275   return dealii::internal::TensorSubscriptor::subscript(
1276     values, i, std::integral_constant<int, dim>());
1277 }
1278 
1279 
1280 template <int rank_, int dim, typename Number>
1281 constexpr DEAL_II_ALWAYS_INLINE
1282     DEAL_II_CUDA_HOST_DEV const typename Tensor<rank_, dim, Number>::value_type &
1283     Tensor<rank_, dim, Number>::operator[](const unsigned int i) const
1284 {
1285 #  ifndef DEAL_II_COMPILER_CUDA_AWARE
1286   AssertIndexRange(i, dim);
1287 #  endif
1288 
1289   return values[i];
1290 }
1291 
1292 
1293 template <int rank_, int dim, typename Number>
1294 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE const Number &
1295                                                      Tensor<rank_, dim, Number>::
1296                                                      operator[](const TableIndices<rank_> &indices) const
1297 {
1298 #  ifndef DEAL_II_COMPILER_CUDA_AWARE
1299   Assert(dim != 0,
1300          ExcMessage("Cannot access an object of type Tensor<rank_,0,Number>"));
1301 #  endif
1302 
1303   return TensorAccessors::extract<rank_>(*this, indices);
1304 }
1305 
1306 
1307 
1308 template <int rank_, int dim, typename Number>
1309 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number &
1310   Tensor<rank_, dim, Number>::operator[](const TableIndices<rank_> &indices)
1311 {
1312 #  ifndef DEAL_II_COMPILER_CUDA_AWARE
1313   Assert(dim != 0,
1314          ExcMessage("Cannot access an object of type Tensor<rank_,0,Number>"));
1315 #  endif
1316 
1317   return TensorAccessors::extract<rank_>(*this, indices);
1318 }
1319 
1320 
1321 
1322 template <int rank_, int dim, typename Number>
1323 inline Number *
1324 Tensor<rank_, dim, Number>::begin_raw()
1325 {
1326   return std::addressof(
1327     this->operator[](this->unrolled_to_component_indices(0)));
1328 }
1329 
1330 
1331 
1332 template <int rank_, int dim, typename Number>
1333 inline const Number *
1334 Tensor<rank_, dim, Number>::begin_raw() const
1335 {
1336   return std::addressof(
1337     this->operator[](this->unrolled_to_component_indices(0)));
1338 }
1339 
1340 
1341 
1342 template <int rank_, int dim, typename Number>
1343 inline Number *
1344 Tensor<rank_, dim, Number>::end_raw()
1345 {
1346   return begin_raw() + n_independent_components;
1347 }
1348 
1349 
1350 
1351 template <int rank_, int dim, typename Number>
1352 inline const Number *
1353 Tensor<rank_, dim, Number>::end_raw() const
1354 {
1355   return begin_raw() + n_independent_components;
1356 }
1357 
1358 
1359 
1360 template <int rank_, int dim, typename Number>
1361 template <typename OtherNumber>
1362 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<rank_, dim, Number> &
1363 Tensor<rank_, dim, Number>::operator=(const Tensor<rank_, dim, OtherNumber> &t)
1364 {
1365   // The following loop could be written more concisely using std::copy, but
1366   // that function is only constexpr from C++20 on.
1367   for (unsigned int i = 0; i < dim; ++i)
1368     values[i] = t.values[i];
1369   return *this;
1370 }
1371 
1372 
1373 template <int rank_, int dim, typename Number>
1374 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<rank_, dim, Number> &
1375 Tensor<rank_, dim, Number>::operator=(const Number &d)
1376 {
1377   Assert(numbers::value_is_zero(d),
1378          ExcMessage("Only assignment with zero is allowed"));
1379   (void)d;
1380 
1381   for (unsigned int i = 0; i < dim; ++i)
1382     values[i] = internal::NumberType<Number>::value(0.0);
1383   return *this;
1384 }
1385 
1386 
1387 template <int rank_, int dim, typename Number>
1388 template <typename OtherNumber>
1389 DEAL_II_CONSTEXPR inline bool
1390 Tensor<rank_, dim, Number>::
1391 operator==(const Tensor<rank_, dim, OtherNumber> &p) const
1392 {
1393   for (unsigned int i = 0; i < dim; ++i)
1394     if (values[i] != p.values[i])
1395       return false;
1396   return true;
1397 }
1398 
1399 
1400 // At some places in the library, we have Point<0> for formal reasons
1401 // (e.g., we sometimes have Quadrature<dim-1> for faces, so we have
1402 // Quadrature<0> for dim=1, and then we have Point<0>). To avoid warnings
1403 // in the above function that the loop end check always fails, we
1404 // implement this function here
1405 template <>
1406 template <>
1407 DEAL_II_CONSTEXPR inline bool
1408 Tensor<1, 0, double>::operator==(const Tensor<1, 0, double> &) const
1409 {
1410   return true;
1411 }
1412 
1413 
1414 template <int rank_, int dim, typename Number>
1415 template <typename OtherNumber>
1416 constexpr bool
1417 Tensor<rank_, dim, Number>::
1418 operator!=(const Tensor<rank_, dim, OtherNumber> &p) const
1419 {
1420   return !((*this) == p);
1421 }
1422 
1423 
1424 template <int rank_, int dim, typename Number>
1425 template <typename OtherNumber>
1426 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1427   DEAL_II_CUDA_HOST_DEV Tensor<rank_, dim, Number> &
1428                         Tensor<rank_, dim, Number>::
1429                         operator+=(const Tensor<rank_, dim, OtherNumber> &p)
1430 {
1431   for (unsigned int i = 0; i < dim; ++i)
1432     values[i] += p.values[i];
1433   return *this;
1434 }
1435 
1436 
1437 template <int rank_, int dim, typename Number>
1438 template <typename OtherNumber>
1439 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1440   DEAL_II_CUDA_HOST_DEV Tensor<rank_, dim, Number> &
1441                         Tensor<rank_, dim, Number>::
1442                         operator-=(const Tensor<rank_, dim, OtherNumber> &p)
1443 {
1444   for (unsigned int i = 0; i < dim; ++i)
1445     values[i] -= p.values[i];
1446   return *this;
1447 }
1448 
1449 
1450 template <int rank_, int dim, typename Number>
1451 template <typename OtherNumber>
1452 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1453   DEAL_II_CUDA_HOST_DEV Tensor<rank_, dim, Number> &
1454   Tensor<rank_, dim, Number>::operator*=(const OtherNumber &s)
1455 {
1456   for (unsigned int i = 0; i < dim; ++i)
1457     values[i] *= s;
1458   return *this;
1459 }
1460 
1461 
1462 namespace internal
1463 {
1464   namespace TensorImplementation
1465   {
1466     template <int rank,
1467               int dim,
1468               typename Number,
1469               typename OtherNumber,
1470               typename std::enable_if<
1471                 !std::is_integral<
1472                   typename ProductType<Number, OtherNumber>::type>::value &&
1473                   !std::is_same<Number, Differentiation::SD::Expression>::value,
1474                 int>::type = 0>
1475     DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE void
1476                       division_operator(Tensor<rank, dim, Number> (&t)[dim],
1477                                         const OtherNumber &factor)
1478     {
1479       const Number inverse_factor = Number(1.) / factor;
1480       // recurse over the base objects
1481       for (unsigned int d = 0; d < dim; ++d)
1482         t[d] *= inverse_factor;
1483     }
1484 
1485 
1486     template <int rank,
1487               int dim,
1488               typename Number,
1489               typename OtherNumber,
1490               typename std::enable_if<
1491                 std::is_integral<
1492                   typename ProductType<Number, OtherNumber>::type>::value ||
1493                   std::is_same<Number, Differentiation::SD::Expression>::value,
1494                 int>::type = 0>
1495     DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE void
1496                       division_operator(Tensor<rank, dim, Number> (&t)[dim],
1497                                         const OtherNumber &factor)
1498     {
1499       // recurse over the base objects
1500       for (unsigned int d = 0; d < dim; ++d)
1501         t[d] /= factor;
1502     }
1503   } // namespace TensorImplementation
1504 } // namespace internal
1505 
1506 
1507 template <int rank_, int dim, typename Number>
1508 template <typename OtherNumber>
1509 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1510   DEAL_II_CUDA_HOST_DEV Tensor<rank_, dim, Number> &
1511   Tensor<rank_, dim, Number>::operator/=(const OtherNumber &s)
1512 {
1513   internal::TensorImplementation::division_operator(values, s);
1514   return *this;
1515 }
1516 
1517 
1518 template <int rank_, int dim, typename Number>
1519 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1520   DEAL_II_CUDA_HOST_DEV Tensor<rank_, dim, Number>
1521   Tensor<rank_, dim, Number>::operator-() const
1522 {
1523   Tensor<rank_, dim, Number> tmp;
1524 
1525   for (unsigned int i = 0; i < dim; ++i)
1526     tmp.values[i] = -values[i];
1527 
1528   return tmp;
1529 }
1530 
1531 
1532 template <int rank_, int dim, typename Number>
1533 inline typename numbers::NumberTraits<Number>::real_type
1534 Tensor<rank_, dim, Number>::norm() const
1535 {
1536   return std::sqrt(norm_square());
1537 }
1538 
1539 
1540 template <int rank_, int dim, typename Number>
1541 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1542   typename numbers::NumberTraits<Number>::real_type
1543   Tensor<rank_, dim, Number>::norm_square() const
1544 {
1545   typename numbers::NumberTraits<Number>::real_type s = internal::NumberType<
1546     typename numbers::NumberTraits<Number>::real_type>::value(0.0);
1547   for (unsigned int i = 0; i < dim; ++i)
1548     s += values[i].norm_square();
1549 
1550   return s;
1551 }
1552 
1553 
1554 template <int rank_, int dim, typename Number>
1555 template <typename OtherNumber>
1556 inline void
1557 Tensor<rank_, dim, Number>::unroll(Vector<OtherNumber> &result) const
1558 {
1559   AssertDimension(result.size(),
1560                   (Utilities::fixed_power<rank_, unsigned int>(dim)));
1561 
1562   unsigned int index = 0;
1563   unroll_recursion(result, index);
1564 }
1565 
1566 
1567 template <int rank_, int dim, typename Number>
1568 template <typename OtherNumber>
1569 inline void
1570 Tensor<rank_, dim, Number>::unroll_recursion(Vector<OtherNumber> &result,
1571                                              unsigned int &       index) const
1572 {
1573   for (unsigned int i = 0; i < dim; ++i)
1574     values[i].unroll_recursion(result, index);
1575 }
1576 
1577 
1578 template <int rank_, int dim, typename Number>
1579 DEAL_II_CONSTEXPR inline unsigned int
1580 Tensor<rank_, dim, Number>::component_to_unrolled_index(
1581   const TableIndices<rank_> &indices)
1582 {
1583   unsigned int index = 0;
1584   for (int r = 0; r < rank_; ++r)
1585     index = index * dim + indices[r];
1586 
1587   return index;
1588 }
1589 
1590 
1591 
1592 namespace internal
1593 {
1594   // unrolled_to_component_indices is instantiated from DataOut for dim==0
1595   // and rank=2. Make sure we don't have compiler warnings.
1596 
1597   template <int dim>
1598   inline DEAL_II_CONSTEXPR unsigned int
1599   mod(const unsigned int x)
1600   {
1601     return x % dim;
1602   }
1603 
1604   template <>
1605   inline unsigned int
1606   mod<0>(const unsigned int x)
1607   {
1608     Assert(false, ExcInternalError());
1609     return x;
1610   }
1611 
1612   template <int dim>
1613   inline DEAL_II_CONSTEXPR unsigned int
1614   div(const unsigned int x)
1615   {
1616     return x / dim;
1617   }
1618 
1619   template <>
1620   inline unsigned int
1621   div<0>(const unsigned int x)
1622   {
1623     Assert(false, ExcInternalError());
1624     return x;
1625   }
1626 
1627 } // namespace internal
1628 
1629 
1630 
1631 template <int rank_, int dim, typename Number>
1632 DEAL_II_CONSTEXPR inline TableIndices<rank_>
1633 Tensor<rank_, dim, Number>::unrolled_to_component_indices(const unsigned int i)
1634 {
1635   AssertIndexRange(i, n_independent_components);
1636 
1637   TableIndices<rank_> indices;
1638 
1639   unsigned int remainder = i;
1640   for (int r = rank_ - 1; r >= 0; --r)
1641     {
1642       indices[r] = internal::mod<dim>(remainder);
1643       remainder  = internal::div<dim>(remainder);
1644     }
1645   Assert(remainder == 0, ExcInternalError());
1646 
1647   return indices;
1648 }
1649 
1650 
1651 template <int rank_, int dim, typename Number>
1652 DEAL_II_CONSTEXPR inline void
1653 Tensor<rank_, dim, Number>::clear()
1654 {
1655   for (unsigned int i = 0; i < dim; ++i)
1656     values[i] = internal::NumberType<Number>::value(0.0);
1657 }
1658 
1659 
1660 template <int rank_, int dim, typename Number>
1661 constexpr std::size_t
1662 Tensor<rank_, dim, Number>::memory_consumption()
1663 {
1664   return sizeof(Tensor<rank_, dim, Number>);
1665 }
1666 
1667 
1668 template <int rank_, int dim, typename Number>
1669 template <class Archive>
1670 inline void
1671 Tensor<rank_, dim, Number>::serialize(Archive &ar, const unsigned int)
1672 {
1673   ar &values;
1674 }
1675 
1676 
1677 template <int rank_, int dim, typename Number>
1678 constexpr unsigned int Tensor<rank_, dim, Number>::n_independent_components;
1679 
1680 #endif // DOXYGEN
1681 
1682 /* ----------------- Non-member functions operating on tensors. ------------ */
1683 
1684 /**
1685  * @name Output functions for Tensor objects
1686  */
1687 //@{
1688 
1689 /**
1690  * Output operator for tensors. Print the elements consecutively, with a space
1691  * in between, two spaces between rank 1 subtensors, three between rank 2 and
1692  * so on.
1693  *
1694  * @relatesalso Tensor
1695  */
1696 template <int rank_, int dim, typename Number>
1697 inline std::ostream &
1698 operator<<(std::ostream &out, const Tensor<rank_, dim, Number> &p)
1699 {
1700   for (unsigned int i = 0; i < dim; ++i)
1701     {
1702       out << p[i];
1703       if (i != dim - 1)
1704         out << ' ';
1705     }
1706 
1707   return out;
1708 }
1709 
1710 
1711 /**
1712  * Output operator for tensors of rank 0. Since such tensors are scalars, we
1713  * simply print this one value.
1714  *
1715  * @relatesalso Tensor
1716  */
1717 template <int dim, typename Number>
1718 inline std::ostream &
1719 operator<<(std::ostream &out, const Tensor<0, dim, Number> &p)
1720 {
1721   out << static_cast<const Number &>(p);
1722   return out;
1723 }
1724 
1725 
1726 //@}
1727 /**
1728  * @name Vector space operations on Tensor objects:
1729  */
1730 //@{
1731 
1732 
1733 /**
1734  * Scalar multiplication of a tensor of rank 0 with an object from the left.
1735  *
1736  * This function unwraps the underlying @p Number stored in the Tensor and
1737  * multiplies @p object with it.
1738  *
1739  * @note This function can also be used in CUDA device code.
1740  *
1741  * @relatesalso Tensor
1742  */
1743 template <int dim, typename Number, typename Other>
1744 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1745   typename ProductType<Other, Number>::type
1746   operator*(const Other &object, const Tensor<0, dim, Number> &t)
1747 {
1748   return object * static_cast<const Number &>(t);
1749 }
1750 
1751 
1752 
1753 /**
1754  * Scalar multiplication of a tensor of rank 0 with an object from the right.
1755  *
1756  * This function unwraps the underlying @p Number stored in the Tensor and
1757  * multiplies @p object with it.
1758  *
1759  * @note This function can also be used in CUDA device code.
1760  *
1761  * @relatesalso Tensor
1762  */
1763 template <int dim, typename Number, typename Other>
1764 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1765   typename ProductType<Number, Other>::type
1766   operator*(const Tensor<0, dim, Number> &t, const Other &object)
1767 {
1768   return static_cast<const Number &>(t) * object;
1769 }
1770 
1771 
1772 /**
1773  * Scalar multiplication of two tensors of rank 0.
1774  *
1775  * This function unwraps the underlying objects of type @p Number and @p
1776  * OtherNumber that are stored within the Tensor and multiplies them. It
1777  * returns an unwrapped number of product type.
1778  *
1779  * @note This function can also be used in CUDA device code.
1780  *
1781  * @relatesalso Tensor
1782  */
1783 template <int dim, typename Number, typename OtherNumber>
1784 DEAL_II_CUDA_HOST_DEV constexpr DEAL_II_ALWAYS_INLINE
1785   typename ProductType<Number, OtherNumber>::type
1786   operator*(const Tensor<0, dim, Number> &     src1,
1787             const Tensor<0, dim, OtherNumber> &src2)
1788 {
1789   return static_cast<const Number &>(src1) *
1790          static_cast<const OtherNumber &>(src2);
1791 }
1792 
1793 
1794 /**
1795  * Division of a tensor of rank 0 by a scalar number.
1796  *
1797  * @note This function can also be used in CUDA device code.
1798  *
1799  * @relatesalso Tensor
1800  */
1801 template <int dim, typename Number, typename OtherNumber>
1802 DEAL_II_CUDA_HOST_DEV constexpr DEAL_II_ALWAYS_INLINE
1803   Tensor<0,
1804          dim,
1805          typename ProductType<Number,
1806                               typename EnableIfScalar<OtherNumber>::type>::type>
1807   operator/(const Tensor<0, dim, Number> &t, const OtherNumber &factor)
1808 {
1809   return static_cast<const Number &>(t) / factor;
1810 }
1811 
1812 
1813 /**
1814  * Add two tensors of rank 0.
1815  *
1816  * @note This function can also be used in CUDA device code.
1817  *
1818  * @relatesalso Tensor
1819  */
1820 template <int dim, typename Number, typename OtherNumber>
1821 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1822                                 Tensor<0, dim, typename ProductType<Number, OtherNumber>::type>
1823                                 operator+(const Tensor<0, dim, Number> &     p,
1824             const Tensor<0, dim, OtherNumber> &q)
1825 {
1826   return static_cast<const Number &>(p) + static_cast<const OtherNumber &>(q);
1827 }
1828 
1829 
1830 /**
1831  * Subtract two tensors of rank 0.
1832  *
1833  * @note This function can also be used in CUDA device code.
1834  *
1835  * @relatesalso Tensor
1836  */
1837 template <int dim, typename Number, typename OtherNumber>
1838 constexpr DEAL_II_ALWAYS_INLINE DEAL_II_CUDA_HOST_DEV
1839                                 Tensor<0, dim, typename ProductType<Number, OtherNumber>::type>
1840                                 operator-(const Tensor<0, dim, Number> &     p,
1841             const Tensor<0, dim, OtherNumber> &q)
1842 {
1843   return static_cast<const Number &>(p) - static_cast<const OtherNumber &>(q);
1844 }
1845 
1846 
1847 /**
1848  * Multiplication of a tensor of general rank with a scalar number from the
1849  * right.
1850  *
1851  * Only multiplication with a scalar number type (i.e., a floating point
1852  * number, a complex floating point number, etc.) is allowed, see the
1853  * documentation of EnableIfScalar for details.
1854  *
1855  * @note This function can also be used in CUDA device code.
1856  *
1857  * @relatesalso Tensor
1858  */
1859 template <int rank, int dim, typename Number, typename OtherNumber>
1860 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1861                   Tensor<rank,
1862          dim,
1863          typename ProductType<Number,
1864                               typename EnableIfScalar<OtherNumber>::type>::type>
1865                   operator*(const Tensor<rank, dim, Number> &t, const OtherNumber &factor)
1866 {
1867   // recurse over the base objects
1868   Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type> tt;
1869   for (unsigned int d = 0; d < dim; ++d)
1870     tt[d] = t[d] * factor;
1871   return tt;
1872 }
1873 
1874 
1875 /**
1876  * Multiplication of a tensor of general rank with a scalar number from the
1877  * left.
1878  *
1879  * Only multiplication with a scalar number type (i.e., a floating point
1880  * number, a complex floating point number, etc.) is allowed, see the
1881  * documentation of EnableIfScalar for details.
1882  *
1883  * @note This function can also be used in CUDA device code.
1884  *
1885  * @relatesalso Tensor
1886  */
1887 template <int rank, int dim, typename Number, typename OtherNumber>
1888 DEAL_II_CUDA_HOST_DEV DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1889                       Tensor<rank,
1890          dim,
1891          typename ProductType<typename EnableIfScalar<Number>::type,
1892                               OtherNumber>::type>
1893                       operator*(const Number &factor, const Tensor<rank, dim, OtherNumber> &t)
1894 {
1895   // simply forward to the operator above
1896   return t * factor;
1897 }
1898 
1899 
1900 namespace internal
1901 {
1902   namespace TensorImplementation
1903   {
1904     template <int rank,
1905               int dim,
1906               typename Number,
1907               typename OtherNumber,
1908               typename std::enable_if<
1909                 !std::is_integral<
1910                   typename ProductType<Number, OtherNumber>::type>::value,
1911                 int>::type = 0>
1912     DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1913                       Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type>
1914                       division_operator(const Tensor<rank, dim, Number> &t,
1915                                         const OtherNumber &              factor)
1916     {
1917       Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type> tt;
1918       const Number inverse_factor = Number(1.) / factor;
1919       // recurse over the base objects
1920       for (unsigned int d = 0; d < dim; ++d)
1921         tt[d] = t[d] * inverse_factor;
1922       return tt;
1923     }
1924 
1925 
1926     template <int rank,
1927               int dim,
1928               typename Number,
1929               typename OtherNumber,
1930               typename std::enable_if<
1931                 std::is_integral<
1932                   typename ProductType<Number, OtherNumber>::type>::value,
1933                 int>::type = 0>
1934     DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1935                       Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type>
1936                       division_operator(const Tensor<rank, dim, Number> &t,
1937                                         const OtherNumber &              factor)
1938     {
1939       Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type> tt;
1940       // recurse over the base objects
1941       for (unsigned int d = 0; d < dim; ++d)
1942         tt[d] = t[d] / factor;
1943       return tt;
1944     }
1945   } // namespace TensorImplementation
1946 } // namespace internal
1947 
1948 
1949 /**
1950  * Division of a tensor of general rank with a scalar number. See the
1951  * discussion on operator*() above for more information about template
1952  * arguments and the return type.
1953  *
1954  * @note This function can also be used in CUDA device code.
1955  *
1956  * @relatesalso Tensor
1957  */
1958 template <int rank, int dim, typename Number, typename OtherNumber>
1959 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1960                   Tensor<rank,
1961          dim,
1962          typename ProductType<Number,
1963                               typename EnableIfScalar<OtherNumber>::type>::type>
1964                   operator/(const Tensor<rank, dim, Number> &t, const OtherNumber &factor)
1965 {
1966   return internal::TensorImplementation::division_operator(t, factor);
1967 }
1968 
1969 
1970 /**
1971  * Addition of two tensors of general rank.
1972  *
1973  * @tparam rank The rank of both tensors.
1974  *
1975  * @note This function can also be used in CUDA device code.
1976  *
1977  * @relatesalso Tensor
1978  */
1979 template <int rank, int dim, typename Number, typename OtherNumber>
1980 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
1981                   Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type>
1982                   operator+(const Tensor<rank, dim, Number> &     p,
1983             const Tensor<rank, dim, OtherNumber> &q)
1984 {
1985   Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type> tmp(p);
1986 
1987   for (unsigned int i = 0; i < dim; ++i)
1988     tmp[i] += q[i];
1989 
1990   return tmp;
1991 }
1992 
1993 
1994 /**
1995  * Subtraction of two tensors of general rank.
1996  *
1997  * @tparam rank The rank of both tensors.
1998  *
1999  * @note This function can also be used in CUDA device code.
2000  *
2001  * @relatesalso Tensor
2002  */
2003 template <int rank, int dim, typename Number, typename OtherNumber>
2004 DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline DEAL_II_ALWAYS_INLINE
2005                   Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type>
2006                   operator-(const Tensor<rank, dim, Number> &     p,
2007             const Tensor<rank, dim, OtherNumber> &q)
2008 {
2009   Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type> tmp(p);
2010 
2011   for (unsigned int i = 0; i < dim; ++i)
2012     tmp[i] -= q[i];
2013 
2014   return tmp;
2015 }
2016 
2017 /**
2018  * Entrywise multiplication of two tensor objects of rank 0 (i.e. the
2019  * multiplication of two scalar values).
2020  *
2021  * @relatesalso Tensor
2022  */
2023 template <int dim, typename Number, typename OtherNumber>
2024 inline DEAL_II_CONSTEXPR DEAL_II_ALWAYS_INLINE
2025                          Tensor<0, dim, typename ProductType<Number, OtherNumber>::type>
2026                          schur_product(const Tensor<0, dim, Number> &     src1,
2027                                        const Tensor<0, dim, OtherNumber> &src2)
2028 {
2029   Tensor<0, dim, typename ProductType<Number, OtherNumber>::type> tmp(src1);
2030 
2031   tmp *= src2;
2032 
2033   return tmp;
2034 }
2035 
2036 /**
2037  * Entrywise multiplication of two tensor objects of general rank.
2038  *
2039  * This multiplication is also called "Hadamard-product" (c.f.
2040  * https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a
2041  * new tensor of size <rank, dim>:
2042  * @f[
2043  *   \text{result}_{i, j}
2044  *   = \text{left}_{i, j}\circ
2045  *     \text{right}_{i, j}
2046  * @f]
2047  *
2048  * @tparam rank The rank of both tensors.
2049  *
2050  * @relatesalso Tensor
2051  */
2052 template <int rank, int dim, typename Number, typename OtherNumber>
2053 inline DEAL_II_CONSTEXPR DEAL_II_ALWAYS_INLINE
2054                          Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type>
2055                          schur_product(const Tensor<rank, dim, Number> &     src1,
2056                                        const Tensor<rank, dim, OtherNumber> &src2)
2057 {
2058   Tensor<rank, dim, typename ProductType<Number, OtherNumber>::type> tmp;
2059 
2060   for (unsigned int i = 0; i < dim; ++i)
2061     tmp[i] = schur_product(Tensor<rank - 1, dim, Number>(src1[i]),
2062                            Tensor<rank - 1, dim, OtherNumber>(src2[i]));
2063 
2064   return tmp;
2065 }
2066 
2067 //@}
2068 /**
2069  * @name Contraction operations and the outer product for tensor objects
2070  */
2071 //@{
2072 
2073 
2074 /**
2075  * The dot product (single contraction) for tensors: Return a tensor of rank
2076  * $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last
2077  * index of a tensor @p src1 of rank @p rank_1 with the first index of a
2078  * tensor @p src2 of rank @p rank_2:
2079  * @f[
2080  *   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
2081  *   = \sum_{k}
2082  *     \text{left}_{i_1,\ldots,i_{r1}, k}
2083  *     \text{right}_{k, j_1,\ldots,j_{r2}}
2084  * @f]
2085  *
2086  * @note For the Tensor class, the multiplication operator only performs a
2087  * contraction over a single pair of indices. This is in contrast to the
2088  * multiplication operator for SymmetricTensor, which does the double
2089  * contraction.
2090  *
2091  * @note In case the contraction yields a tensor of rank 0 the scalar number
2092  * is returned as an unwrapped number type.
2093  *
2094  * @relatesalso Tensor
2095  */
2096 template <int rank_1,
2097           int rank_2,
2098           int dim,
2099           typename Number,
2100           typename OtherNumber,
2101           typename = typename std::enable_if<rank_1 >= 1 && rank_2 >= 1>::type>
2102 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2103   typename Tensor<rank_1 + rank_2 - 2,
2104                   dim,
2105                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2106   operator*(const Tensor<rank_1, dim, Number> &     src1,
2107             const Tensor<rank_2, dim, OtherNumber> &src2)
2108 {
2109   typename Tensor<rank_1 + rank_2 - 2,
2110                   dim,
2111                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2112     result{};
2113 
2114   TensorAccessors::internal::
2115     ReorderedIndexView<0, rank_2, const Tensor<rank_2, dim, OtherNumber>>
2116       reordered = TensorAccessors::reordered_index_view<0, rank_2>(src2);
2117   TensorAccessors::contract<1, rank_1, rank_2, dim>(result, src1, reordered);
2118 
2119   return result;
2120 }
2121 
2122 
2123 /**
2124  * Generic contraction of a pair of indices of two tensors of arbitrary rank:
2125  * Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the
2126  * contraction of index @p index_1 of a tensor @p src1 of rank @p rank_1 with
2127  * the index @p index_2 of a tensor @p src2 of rank @p rank_2:
2128  * @f[
2129  *   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
2130  *   = \sum_{k}
2131  *     \text{left}_{i_1,\ldots,k,\ldots,i_{r1}}
2132  *     \text{right}_{j_1,\ldots,k,\ldots,j_{r2}}
2133  * @f]
2134  *
2135  * If for example the first index (<code>index_1==0</code>) of a tensor
2136  * <code>t1</code> shall be contracted with the third index
2137  * (<code>index_2==2</code>) of a tensor <code>t2</code>, this function should
2138  * be invoked as
2139  * @code
2140  *   contract<0, 2>(t1, t2);
2141  * @endcode
2142  *
2143  * @note The position of the index is counted from 0, i.e.,
2144  * $0\le\text{index}_i<\text{range}_i$.
2145  *
2146  * @note In case the contraction yields a tensor of rank 0 the scalar number
2147  * is returned as an unwrapped number type.
2148  *
2149  * @relatesalso Tensor
2150  */
2151 template <int index_1,
2152           int index_2,
2153           int rank_1,
2154           int rank_2,
2155           int dim,
2156           typename Number,
2157           typename OtherNumber>
2158 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2159   typename Tensor<rank_1 + rank_2 - 2,
2160                   dim,
2161                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2162   contract(const Tensor<rank_1, dim, Number> &     src1,
2163            const Tensor<rank_2, dim, OtherNumber> &src2)
2164 {
2165   Assert(0 <= index_1 && index_1 < rank_1,
2166          ExcMessage(
2167            "The specified index_1 must lie within the range [0,rank_1)"));
2168   Assert(0 <= index_2 && index_2 < rank_2,
2169          ExcMessage(
2170            "The specified index_2 must lie within the range [0,rank_2)"));
2171 
2172   using namespace TensorAccessors;
2173   using namespace TensorAccessors::internal;
2174 
2175   // Reorder index_1 to the end of src1:
2176   ReorderedIndexView<index_1, rank_1, const Tensor<rank_1, dim, Number>>
2177     reord_01 = reordered_index_view<index_1, rank_1>(src1);
2178 
2179   // Reorder index_2 to the end of src2:
2180   ReorderedIndexView<index_2, rank_2, const Tensor<rank_2, dim, OtherNumber>>
2181     reord_02 = reordered_index_view<index_2, rank_2>(src2);
2182 
2183   typename Tensor<rank_1 + rank_2 - 2,
2184                   dim,
2185                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2186     result{};
2187   TensorAccessors::contract<1, rank_1, rank_2, dim>(result, reord_01, reord_02);
2188   return result;
2189 }
2190 
2191 
2192 /**
2193  * Generic contraction of two pairs of indices of two tensors of arbitrary
2194  * rank: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 4)$ that is
2195  * the contraction of index @p index_1 with index @p index_2, and index @p
2196  * index_3 with index @p index_4 of a tensor @p src1 of rank @p rank_1 and a
2197  * tensor @p src2 of rank @p rank_2:
2198  * @f[
2199  *   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
2200  *   = \sum_{k, l}
2201  *     \text{left}_{i_1,\ldots,k,\ldots,l,\ldots,i_{r1}}
2202  *     \text{right}_{j_1,\ldots,k,\ldots,l\ldots,j_{r2}}
2203  * @f]
2204  *
2205  * If for example the first index (<code>index_1==0</code>) shall be
2206  * contracted with the third index (<code>index_2==2</code>), and the second
2207  * index (<code>index_3==1</code>) with the first index
2208  * (<code>index_4==0</code>) of a tensor <code>t2</code>, this function should
2209  * be invoked as
2210  * @code
2211  *   double_contract<0, 2, 1, 0>(t1, t2);
2212  * @endcode
2213  *
2214  * @note The position of the index is counted from 0, i.e.,
2215  * $0\le\text{index}_i<\text{range}_i$.
2216  *
2217  * @note In case the contraction yields a tensor of rank 0 the scalar number
2218  * is returned as an unwrapped number type.
2219  *
2220  * @relatesalso Tensor
2221  */
2222 template <int index_1,
2223           int index_2,
2224           int index_3,
2225           int index_4,
2226           int rank_1,
2227           int rank_2,
2228           int dim,
2229           typename Number,
2230           typename OtherNumber>
2231 DEAL_II_CONSTEXPR inline
2232   typename Tensor<rank_1 + rank_2 - 4,
2233                   dim,
2234                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2235   double_contract(const Tensor<rank_1, dim, Number> &     src1,
2236                   const Tensor<rank_2, dim, OtherNumber> &src2)
2237 {
2238   Assert(0 <= index_1 && index_1 < rank_1,
2239          ExcMessage(
2240            "The specified index_1 must lie within the range [0,rank_1)"));
2241   Assert(0 <= index_3 && index_3 < rank_1,
2242          ExcMessage(
2243            "The specified index_3 must lie within the range [0,rank_1)"));
2244   Assert(index_1 != index_3,
2245          ExcMessage("index_1 and index_3 must not be the same"));
2246   Assert(0 <= index_2 && index_2 < rank_2,
2247          ExcMessage(
2248            "The specified index_2 must lie within the range [0,rank_2)"));
2249   Assert(0 <= index_4 && index_4 < rank_2,
2250          ExcMessage(
2251            "The specified index_4 must lie within the range [0,rank_2)"));
2252   Assert(index_2 != index_4,
2253          ExcMessage("index_2 and index_4 must not be the same"));
2254 
2255   using namespace TensorAccessors;
2256   using namespace TensorAccessors::internal;
2257 
2258   // Reorder index_1 to the end of src1:
2259   ReorderedIndexView<index_1, rank_1, const Tensor<rank_1, dim, Number>>
2260     reord_1 = TensorAccessors::reordered_index_view<index_1, rank_1>(src1);
2261 
2262   // Reorder index_2 to the end of src2:
2263   ReorderedIndexView<index_2, rank_2, const Tensor<rank_2, dim, OtherNumber>>
2264     reord_2 = TensorAccessors::reordered_index_view<index_2, rank_2>(src2);
2265 
2266   // Now, reorder index_3 to the end of src1. We have to make sure to
2267   // preserve the original ordering: index_1 has been removed. If
2268   // index_3 > index_1, we have to use (index_3 - 1) instead:
2269   ReorderedIndexView<
2270     (index_3 < index_1 ? index_3 : index_3 - 1),
2271     rank_1,
2272     ReorderedIndexView<index_1, rank_1, const Tensor<rank_1, dim, Number>>>
2273     reord_3 =
2274       TensorAccessors::reordered_index_view < index_3 < index_1 ? index_3 :
2275                                                                   index_3 - 1,
2276     rank_1 > (reord_1);
2277 
2278   // Now, reorder index_4 to the end of src2. We have to make sure to
2279   // preserve the original ordering: index_2 has been removed. If
2280   // index_4 > index_2, we have to use (index_4 - 1) instead:
2281   ReorderedIndexView<
2282     (index_4 < index_2 ? index_4 : index_4 - 1),
2283     rank_2,
2284     ReorderedIndexView<index_2, rank_2, const Tensor<rank_2, dim, OtherNumber>>>
2285     reord_4 =
2286       TensorAccessors::reordered_index_view < index_4 < index_2 ? index_4 :
2287                                                                   index_4 - 1,
2288     rank_2 > (reord_2);
2289 
2290   typename Tensor<rank_1 + rank_2 - 4,
2291                   dim,
2292                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2293     result{};
2294   TensorAccessors::contract<2, rank_1, rank_2, dim>(result, reord_3, reord_4);
2295   return result;
2296 }
2297 
2298 
2299 /**
2300  * The scalar product, or (generalized) Frobenius inner product of two tensors
2301  * of equal rank: Return a scalar number that is the result of a full
2302  * contraction of a tensor @p left and @p right:
2303  * @f[
2304  *   \sum_{i_1,\ldots,i_r}
2305  *   \text{left}_{i_1,\ldots,i_r}
2306  *   \text{right}_{i_1,\ldots,i_r}
2307  * @f]
2308  *
2309  * @relatesalso Tensor
2310  */
2311 template <int rank, int dim, typename Number, typename OtherNumber>
2312 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2313   typename ProductType<Number, OtherNumber>::type
2314   scalar_product(const Tensor<rank, dim, Number> &     left,
2315                  const Tensor<rank, dim, OtherNumber> &right)
2316 {
2317   typename ProductType<Number, OtherNumber>::type result{};
2318   TensorAccessors::contract<rank, rank, rank, dim>(result, left, right);
2319   return result;
2320 }
2321 
2322 
2323 /**
2324  * Full contraction of three tensors: Return a scalar number that is the
2325  * result of a full contraction of a tensor @p left of rank @p rank_1, a
2326  * tensor @p middle of rank $(\text{rank}_1+\text{rank}_2)$ and a tensor @p
2327  * right of rank @p rank_2:
2328  * @f[
2329  *   \sum_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
2330  *   \text{left}_{i_1,\ldots,i_{r1}}
2331  *   \text{middle}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
2332  *   \text{right}_{j_1,\ldots,j_{r2}}
2333  * @f]
2334  *
2335  * @note Each of the three input tensors can be either a Tensor or
2336  * SymmetricTensor.
2337  *
2338  * @relatesalso Tensor
2339  */
2340 template <template <int, int, typename> class TensorT1,
2341           template <int, int, typename> class TensorT2,
2342           template <int, int, typename> class TensorT3,
2343           int rank_1,
2344           int rank_2,
2345           int dim,
2346           typename T1,
2347           typename T2,
2348           typename T3>
2349 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2350   typename ProductType<T1, typename ProductType<T2, T3>::type>::type
2351   contract3(const TensorT1<rank_1, dim, T1> &         left,
2352             const TensorT2<rank_1 + rank_2, dim, T2> &middle,
2353             const TensorT3<rank_2, dim, T3> &         right)
2354 {
2355   using return_type =
2356     typename ProductType<T1, typename ProductType<T2, T3>::type>::type;
2357   return TensorAccessors::contract3<rank_1, rank_2, dim, return_type>(left,
2358                                                                       middle,
2359                                                                       right);
2360 }
2361 
2362 
2363 /**
2364  * The outer product of two tensors of @p rank_1 and @p rank_2: Returns a
2365  * tensor of rank $(\text{rank}_1 + \text{rank}_2)$:
2366  * @f[
2367  *   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
2368  *   = \text{left}_{i_1,\ldots,i_{r1}}\,\text{right}_{j_1,\ldots,j_{r2}.}
2369  * @f]
2370  *
2371  * @relatesalso Tensor
2372  */
2373 template <int rank_1,
2374           int rank_2,
2375           int dim,
2376           typename Number,
2377           typename OtherNumber>
2378 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2379   Tensor<rank_1 + rank_2, dim, typename ProductType<Number, OtherNumber>::type>
2380   outer_product(const Tensor<rank_1, dim, Number> &     src1,
2381                 const Tensor<rank_2, dim, OtherNumber> &src2)
2382 {
2383   typename Tensor<rank_1 + rank_2,
2384                   dim,
2385                   typename ProductType<Number, OtherNumber>::type>::tensor_type
2386     result{};
2387   TensorAccessors::contract<0, rank_1, rank_2, dim>(result, src1, src2);
2388   return result;
2389 }
2390 
2391 
2392 //@}
2393 /**
2394  * @name Special operations on tensors of rank 1
2395  */
2396 //@{
2397 
2398 
2399 /**
2400  * Return the cross product in 2d. This is just a rotation by 90 degrees
2401  * clockwise to compute the outer normal from a tangential vector. This
2402  * function is defined for all space dimensions to allow for dimension
2403  * independent programming (e.g. within switches over the space dimension),
2404  * but may only be called if the actual dimension of the arguments is two
2405  * (e.g. from the <tt>dim==2</tt> case in the switch).
2406  *
2407  * @relatesalso Tensor
2408  */
2409 template <int dim, typename Number>
2410 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<1, dim, Number>
2411                                                cross_product_2d(const Tensor<1, dim, Number> &src)
2412 {
2413   Assert(dim == 2, ExcInternalError());
2414 
2415   Tensor<1, dim, Number> result;
2416 
2417   result[0] = src[1];
2418   result[1] = -src[0];
2419 
2420   return result;
2421 }
2422 
2423 
2424 /**
2425  * Return the cross product of 2 vectors in 3d. This function is defined for
2426  * all space dimensions to allow for dimension independent programming (e.g.
2427  * within switches over the space dimension), but may only be called if the
2428  * actual dimension of the arguments is three (e.g. from the <tt>dim==3</tt>
2429  * case in the switch).
2430  *
2431  * @relatesalso Tensor
2432  */
2433 template <int dim, typename Number1, typename Number2>
2434 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2435   Tensor<1, dim, typename ProductType<Number1, Number2>::type>
2436   cross_product_3d(const Tensor<1, dim, Number1> &src1,
2437                    const Tensor<1, dim, Number2> &src2)
2438 {
2439   Assert(dim == 3, ExcInternalError());
2440 
2441   Tensor<1, dim, typename ProductType<Number1, Number2>::type> result;
2442 
2443   // avoid compiler warnings
2444   constexpr int s0 = 0 % dim;
2445   constexpr int s1 = 1 % dim;
2446   constexpr int s2 = 2 % dim;
2447 
2448   result[s0] = src1[s1] * src2[s2] - src1[s2] * src2[s1];
2449   result[s1] = src1[s2] * src2[s0] - src1[s0] * src2[s2];
2450   result[s2] = src1[s0] * src2[s1] - src1[s1] * src2[s0];
2451 
2452   return result;
2453 }
2454 
2455 
2456 //@}
2457 /**
2458  * @name Special operations on tensors of rank 2
2459  */
2460 //@{
2461 
2462 
2463 /**
2464  * Compute the determinant of a tensor or rank 2.
2465  *
2466  * @relatesalso Tensor
2467  */
2468 template <int dim, typename Number>
2469 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number
2470                                                determinant(const Tensor<2, dim, Number> &t)
2471 {
2472   // Compute the determinant using the Laplace expansion of the
2473   // determinant. We expand along the last row.
2474   Number det = internal::NumberType<Number>::value(0.0);
2475 
2476   for (unsigned int k = 0; k < dim; ++k)
2477     {
2478       Tensor<2, dim - 1, Number> minor;
2479       for (unsigned int i = 0; i < dim - 1; ++i)
2480         for (unsigned int j = 0; j < dim - 1; ++j)
2481           minor[i][j] = t[i][j < k ? j : j + 1];
2482 
2483       const Number cofactor = ((k % 2 == 0) ? -1. : 1.) * determinant(minor);
2484 
2485       det += t[dim - 1][k] * cofactor;
2486     }
2487 
2488   return ((dim % 2 == 0) ? 1. : -1.) * det;
2489 }
2490 
2491 /**
2492  * Specialization for dim==1.
2493  *
2494  * @relatesalso Tensor
2495  */
2496 template <typename Number>
2497 constexpr DEAL_II_ALWAYS_INLINE Number
2498                                 determinant(const Tensor<2, 1, Number> &t)
2499 {
2500   return t[0][0];
2501 }
2502 
2503 /**
2504  * Specialization for dim==2.
2505  *
2506  * @relatesalso Tensor
2507  */
2508 template <typename Number>
2509 constexpr DEAL_II_ALWAYS_INLINE Number
2510                                 determinant(const Tensor<2, 2, Number> &t)
2511 {
2512   // hard-coded for efficiency reasons
2513   return t[0][0] * t[1][1] - t[1][0] * t[0][1];
2514 }
2515 
2516 /**
2517  * Specialization for dim==3.
2518  *
2519  * @relatesalso Tensor
2520  */
2521 template <typename Number>
2522 constexpr DEAL_II_ALWAYS_INLINE Number
2523                                 determinant(const Tensor<2, 3, Number> &t)
2524 {
2525   // hard-coded for efficiency reasons
2526   const Number C0 = internal::NumberType<Number>::value(t[1][1] * t[2][2]) -
2527                     internal::NumberType<Number>::value(t[1][2] * t[2][1]);
2528   const Number C1 = internal::NumberType<Number>::value(t[1][2] * t[2][0]) -
2529                     internal::NumberType<Number>::value(t[1][0] * t[2][2]);
2530   const Number C2 = internal::NumberType<Number>::value(t[1][0] * t[2][1]) -
2531                     internal::NumberType<Number>::value(t[1][1] * t[2][0]);
2532   return t[0][0] * C0 + t[0][1] * C1 + t[0][2] * C2;
2533 }
2534 
2535 
2536 /**
2537  * Compute and return the trace of a tensor of rank 2, i.e. the sum of its
2538  * diagonal entries.
2539  *
2540  * @relatesalso Tensor
2541  */
2542 template <int dim, typename Number>
2543 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number
2544                                                trace(const Tensor<2, dim, Number> &d)
2545 {
2546   Number t = d[0][0];
2547   for (unsigned int i = 1; i < dim; ++i)
2548     t += d[i][i];
2549   return t;
2550 }
2551 
2552 
2553 /**
2554  * Compute and return the inverse of the given tensor. Since the compiler can
2555  * perform the return value optimization, and since the size of the return
2556  * object is known, it is acceptable to return the result by value, rather
2557  * than by reference as a parameter.
2558  *
2559  * @relatesalso Tensor
2560  */
2561 template <int dim, typename Number>
2562 DEAL_II_CONSTEXPR inline Tensor<2, dim, Number>
2563 invert(const Tensor<2, dim, Number> &)
2564 {
2565   Number return_tensor[dim][dim];
2566 
2567   // if desired, take over the
2568   // inversion of a 4x4 tensor
2569   // from the FullMatrix
2570   AssertThrow(false, ExcNotImplemented());
2571 
2572   return Tensor<2, dim, Number>(return_tensor);
2573 }
2574 
2575 
2576 #ifndef DOXYGEN
2577 
2578 template <typename Number>
2579 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<2, 1, Number>
2580                                                invert(const Tensor<2, 1, Number> &t)
2581 {
2582   Tensor<2, 1, Number> return_tensor;
2583 
2584   return_tensor[0][0] = internal::NumberType<Number>::value(1.0 / t[0][0]);
2585 
2586   return return_tensor;
2587 }
2588 
2589 
2590 template <typename Number>
2591 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<2, 2, Number>
2592                                                invert(const Tensor<2, 2, Number> &t)
2593 {
2594   Tensor<2, 2, Number> return_tensor;
2595 
2596   const Number inv_det_t = internal::NumberType<Number>::value(
2597     1.0 / (t[0][0] * t[1][1] - t[1][0] * t[0][1]));
2598   return_tensor[0][0] = t[1][1];
2599   return_tensor[0][1] = -t[0][1];
2600   return_tensor[1][0] = -t[1][0];
2601   return_tensor[1][1] = t[0][0];
2602   return_tensor *= inv_det_t;
2603 
2604   return return_tensor;
2605 }
2606 
2607 
2608 template <typename Number>
2609 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<2, 3, Number>
2610                                                invert(const Tensor<2, 3, Number> &t)
2611 {
2612   Tensor<2, 3, Number> return_tensor;
2613 
2614   return_tensor[0][0] = internal::NumberType<Number>::value(t[1][1] * t[2][2]) -
2615                         internal::NumberType<Number>::value(t[1][2] * t[2][1]);
2616   return_tensor[0][1] = internal::NumberType<Number>::value(t[0][2] * t[2][1]) -
2617                         internal::NumberType<Number>::value(t[0][1] * t[2][2]);
2618   return_tensor[0][2] = internal::NumberType<Number>::value(t[0][1] * t[1][2]) -
2619                         internal::NumberType<Number>::value(t[0][2] * t[1][1]);
2620   return_tensor[1][0] = internal::NumberType<Number>::value(t[1][2] * t[2][0]) -
2621                         internal::NumberType<Number>::value(t[1][0] * t[2][2]);
2622   return_tensor[1][1] = internal::NumberType<Number>::value(t[0][0] * t[2][2]) -
2623                         internal::NumberType<Number>::value(t[0][2] * t[2][0]);
2624   return_tensor[1][2] = internal::NumberType<Number>::value(t[0][2] * t[1][0]) -
2625                         internal::NumberType<Number>::value(t[0][0] * t[1][2]);
2626   return_tensor[2][0] = internal::NumberType<Number>::value(t[1][0] * t[2][1]) -
2627                         internal::NumberType<Number>::value(t[1][1] * t[2][0]);
2628   return_tensor[2][1] = internal::NumberType<Number>::value(t[0][1] * t[2][0]) -
2629                         internal::NumberType<Number>::value(t[0][0] * t[2][1]);
2630   return_tensor[2][2] = internal::NumberType<Number>::value(t[0][0] * t[1][1]) -
2631                         internal::NumberType<Number>::value(t[0][1] * t[1][0]);
2632   const Number inv_det_t = internal::NumberType<Number>::value(
2633     1.0 / (t[0][0] * return_tensor[0][0] + t[0][1] * return_tensor[1][0] +
2634            t[0][2] * return_tensor[2][0]));
2635   return_tensor *= inv_det_t;
2636 
2637   return return_tensor;
2638 }
2639 
2640 #endif /* DOXYGEN */
2641 
2642 
2643 /**
2644  * Return the transpose of the given tensor.
2645  *
2646  * @relatesalso Tensor
2647  */
2648 template <int dim, typename Number>
2649 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Tensor<2, dim, Number>
2650                                                transpose(const Tensor<2, dim, Number> &t)
2651 {
2652   Tensor<2, dim, Number> tt;
2653   for (unsigned int i = 0; i < dim; ++i)
2654     {
2655       tt[i][i] = t[i][i];
2656       for (unsigned int j = i + 1; j < dim; ++j)
2657         {
2658           tt[i][j] = t[j][i];
2659           tt[j][i] = t[i][j];
2660         };
2661     }
2662   return tt;
2663 }
2664 
2665 
2666 /**
2667  * Return the adjugate of the given tensor of rank 2.
2668  * The adjugate of a tensor $\mathbf A$ is defined as
2669  * @f[
2670  *  \textrm{adj}\mathbf A
2671  *   \dealcoloneq \textrm{det}\mathbf A \; \mathbf{A}^{-1}
2672  * \; .
2673  * @f]
2674  *
2675  * @note This requires that the tensor is invertible.
2676  *
2677  * @relatesalso Tensor
2678  */
2679 template <int dim, typename Number>
2680 constexpr Tensor<2, dim, Number>
2681 adjugate(const Tensor<2, dim, Number> &t)
2682 {
2683   return determinant(t) * invert(t);
2684 }
2685 
2686 
2687 /**
2688  * Return the cofactor of the given tensor of rank 2.
2689  * The cofactor of a tensor $\mathbf A$ is defined as
2690  * @f[
2691  *  \textrm{cof}\mathbf A
2692  *   \dealcoloneq \textrm{det}\mathbf A \; \mathbf{A}^{-T}
2693  *    = \left[ \textrm{adj}\mathbf A \right]^{T} \; .
2694  * @f]
2695  *
2696  * @note This requires that the tensor is invertible.
2697  *
2698  * @relatesalso Tensor
2699  */
2700 template <int dim, typename Number>
2701 constexpr Tensor<2, dim, Number>
2702 cofactor(const Tensor<2, dim, Number> &t)
2703 {
2704   return transpose(adjugate(t));
2705 }
2706 
2707 
2708 /**
2709  * Return the nearest orthogonal matrix
2710  * $\hat {\mathbf A}=\mathbf U \mathbf{V}^T$ by
2711  * combining the products of the singular value decomposition (SVD)
2712  * ${\mathbf A}=\mathbf U  \mathbf S \mathbf V^T$ for a given input
2713  * ${\mathbf A}$, effectively replacing $\mathbf S$ with the identity matrix.
2714  *
2715  * This is a (nonlinear)
2716  * [projection
2717  * operation](https://en.wikipedia.org/wiki/Projection_(mathematics)) since when
2718  * applied twice, we have $\hat{\hat{\mathbf A}}=\hat{\mathbf A}$ as is easy to
2719  * see. (That is because the SVD of $\hat {\mathbf A}$ is simply
2720  * $\mathbf U \mathbf I \mathbf{V}^T$.) Furthermore, $\hat {\mathbf A}$ is
2721  * really an orthogonal matrix because orthogonal matrices have to satisfy
2722  * ${\hat {\mathbf A}}^T \hat {\mathbf A}={\mathbf I}$, which here implies
2723  * that
2724  * @f{align*}{
2725  *   {\hat {\mathbf A}}^T \hat {\mathbf A}
2726  *   &=
2727  *   \left(\mathbf U \mathbf{V}^T\right)^T\left(\mathbf U \mathbf{V}^T\right)
2728  *   \\
2729  *   &=
2730  *   \mathbf V \mathbf{U}^T
2731  *   \mathbf U \mathbf{V}^T
2732  *   \\
2733  *   &=
2734  *   \mathbf V \left(\mathbf{U}^T
2735  *   \mathbf U\right) \mathbf{V}^T
2736  *   \\
2737  *   &=
2738  *   \mathbf V \mathbf I \mathbf{V}^T
2739  *   \\
2740  *   &=
2741  *   \mathbf V \mathbf{V}^T
2742  *   \\
2743  *   &=
2744  *   \mathbf I
2745  * @f}
2746  * due to the fact that the $\mathbf U$ and $\mathbf V$ factors that come out
2747  * of the SVD are themselves orthogonal matrices.
2748  *
2749  * @param A The tensor for which to find the closest orthogonal tensor.
2750  * @tparam Number The type used to store the entries of the tensor.
2751  *   Must be either `float` or `double`.
2752  * @pre In order to use this function, this program must be linked with the
2753  *   LAPACK library.
2754  * @pre @p A must not be singular. This is because, conceptually, the problem
2755  *   to be solved here is trying to find a matrix $\hat{\mathbf A}$ that
2756  *   minimizes some kind of distance from $\mathbf A$ while satisfying the
2757  *   quadratic constraint
2758  *   ${\hat {\mathbf A}}^T \hat {\mathbf A}={\mathbf I}$. This is not so
2759  *   dissimilar to the kind of problem where one wants to find a vector
2760  *   $\hat{\mathbf x}\in{\mathbb R}^n$ that minimizes the quadratic objective
2761  *   function $\|\hat {\mathbf x} - \mathbf x\|^2$ for a given $\mathbf x$
2762  *   subject to the constraint $\|\mathbf x\|^2=1$ -- in other
2763  *   words, we are seeking the point $\hat{\mathbf x}$ on the unit sphere
2764  *   that is closest to $\mathbf x$. This problem has a solution for all
2765  *   $\mathbf x$ except if $\mathbf x=0$. The corresponding condition
2766  *   for the problem we are considering here is that $\mathbf A$ must not
2767  *   have a zero eigenvalue.
2768  *
2769  * @relatesalso Tensor
2770  */
2771 template <int dim, typename Number>
2772 Tensor<2, dim, Number>
2773 project_onto_orthogonal_tensors(const Tensor<2, dim, Number> &A);
2774 
2775 
2776 /**
2777  * Return the $l_1$ norm of the given rank-2 tensor, where
2778  * $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$
2779  * (maximum of the sums over columns).
2780  *
2781  * @relatesalso Tensor
2782  */
2783 template <int dim, typename Number>
2784 inline Number
2785 l1_norm(const Tensor<2, dim, Number> &t)
2786 {
2787   Number max = internal::NumberType<Number>::value(0.0);
2788   for (unsigned int j = 0; j < dim; ++j)
2789     {
2790       Number sum = internal::NumberType<Number>::value(0.0);
2791       for (unsigned int i = 0; i < dim; ++i)
2792         sum += std::fabs(t[i][j]);
2793 
2794       if (sum > max)
2795         max = sum;
2796     }
2797 
2798   return max;
2799 }
2800 
2801 
2802 /**
2803  * Return the $l_\infty$ norm of the given rank-2 tensor, where
2804  * $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$
2805  * (maximum of the sums over rows).
2806  *
2807  * @relatesalso Tensor
2808  */
2809 template <int dim, typename Number>
2810 inline Number
2811 linfty_norm(const Tensor<2, dim, Number> &t)
2812 {
2813   Number max = internal::NumberType<Number>::value(0.0);
2814   for (unsigned int i = 0; i < dim; ++i)
2815     {
2816       Number sum = internal::NumberType<Number>::value(0.0);
2817       for (unsigned int j = 0; j < dim; ++j)
2818         sum += std::fabs(t[i][j]);
2819 
2820       if (sum > max)
2821         max = sum;
2822     }
2823 
2824   return max;
2825 }
2826 
2827 //@}
2828 
2829 
2830 #ifndef DOXYGEN
2831 
2832 
2833 #  ifdef DEAL_II_ADOLC_WITH_ADVANCED_BRANCHING
2834 
2835 // Specialization of functions for ADOL-C number types when
2836 // the advanced branching feature is used
2837 template <int dim>
2838 inline adouble
2839 l1_norm(const Tensor<2, dim, adouble> &t)
2840 {
2841   adouble max = internal::NumberType<adouble>::value(0.0);
2842   for (unsigned int j = 0; j < dim; ++j)
2843     {
2844       adouble sum = internal::NumberType<adouble>::value(0.0);
2845       for (unsigned int i = 0; i < dim; ++i)
2846         sum += std::fabs(t[i][j]);
2847 
2848       condassign(max, (sum > max), sum, max);
2849     }
2850 
2851   return max;
2852 }
2853 
2854 
2855 template <int dim>
2856 inline adouble
2857 linfty_norm(const Tensor<2, dim, adouble> &t)
2858 {
2859   adouble max = internal::NumberType<adouble>::value(0.0);
2860   for (unsigned int i = 0; i < dim; ++i)
2861     {
2862       adouble sum = internal::NumberType<adouble>::value(0.0);
2863       for (unsigned int j = 0; j < dim; ++j)
2864         sum += std::fabs(t[i][j]);
2865 
2866       condassign(max, (sum > max), sum, max);
2867     }
2868 
2869   return max;
2870 }
2871 
2872 #  endif // DEAL_II_ADOLC_WITH_ADVANCED_BRANCHING
2873 
2874 
2875 #endif // DOXYGEN
2876 
2877 DEAL_II_NAMESPACE_CLOSE
2878 
2879 #endif
2880