1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5 // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11 #ifndef EIGEN_XPRHELPER_H
12 #define EIGEN_XPRHELPER_H
13
14 // just a workaround because GCC seems to not really like empty structs
15 // FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
16 // so currently we simply disable this optimization for gcc 4.3
17 #if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
18 #define EIGEN_EMPTY_STRUCT_CTOR(X) \
19 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
20 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
21 #else
22 #define EIGEN_EMPTY_STRUCT_CTOR(X)
23 #endif
24
25 namespace Eigen {
26
27 namespace internal {
28
29 template<typename IndexDest, typename IndexSrc>
30 EIGEN_DEVICE_FUNC
convert_index(const IndexSrc & idx)31 inline IndexDest convert_index(const IndexSrc& idx) {
32 // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
33 eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
34 return IndexDest(idx);
35 }
36
37 // true if T can be considered as an integral index (i.e., and integral type or enum)
38 template<typename T> struct is_valid_index_type
39 {
40 enum { value =
41 #if EIGEN_HAS_TYPE_TRAITS
42 internal::is_integral<T>::value || std::is_enum<T>::value
43 #elif EIGEN_COMP_MSVC
44 internal::is_integral<T>::value || __is_enum(T)
45 #else
46 // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
47 internal::is_convertible<T,Index>::value && !internal::is_same<T,float>::value && !is_same<T,double>::value
48 #endif
49 };
50 };
51
52 // promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
53 // expression * scalar
54 // Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
55 // The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.
56 // Then the logic is as follows:
57 // - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.
58 // - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.
59 // - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.
60 // - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).
61 template<typename ExprScalar,typename T, bool IsSupported>
62 struct promote_scalar_arg;
63
64 template<typename S,typename T>
65 struct promote_scalar_arg<S,T,true>
66 {
67 typedef T type;
68 };
69
70 // Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.
71 template<typename ExprScalar,typename T,typename PromotedType,
72 bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,
73 bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
74 struct promote_scalar_arg_unsupported;
75
76 // Start recursion with NumTraits<ExprScalar>::Literal
77 template<typename S,typename T>
78 struct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};
79
80 // We found a match!
81 template<typename S,typename T, typename PromotedType>
82 struct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>
83 {
84 typedef PromotedType type;
85 };
86
87 // No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,
88 // so let's try to promote to ExprScalar
89 template<typename ExprScalar,typename T, typename PromotedType>
90 struct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>
91 : promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>
92 {};
93
94 // Unsafe real-to-integer, let's stop.
95 template<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>
96 struct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};
97
98 // T is not even convertible to ExprScalar, let's stop.
99 template<typename S,typename T>
100 struct promote_scalar_arg_unsupported<S,T,S,false,true> {};
101
102 //classes inheriting no_assignment_operator don't generate a default operator=.
103 class no_assignment_operator
104 {
105 private:
106 no_assignment_operator& operator=(const no_assignment_operator&);
107 protected:
108 EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator)
109 EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator)
110 };
111
112 /** \internal return the index type with the largest number of bits */
113 template<typename I1, typename I2>
114 struct promote_index_type
115 {
116 typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
117 };
118
119 /** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
120 * can be accessed using value() and setValue().
121 * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
122 */
123 template<typename T, int Value> class variable_if_dynamic
124 {
125 public:
126 EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
127 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
128 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
129 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
130 };
131
132 template<typename T> class variable_if_dynamic<T, Dynamic>
133 {
134 T m_value;
135 EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); }
136 public:
137 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value) : m_value(value) {}
138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
139 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
140 };
141
142 /** \internal like variable_if_dynamic but for DynamicIndex
143 */
144 template<typename T, int Value> class variable_if_dynamicindex
145 {
146 public:
147 EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
149 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
150 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
151 };
152
153 template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
154 {
155 T m_value;
156 EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
157 public:
158 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}
159 EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }
160 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
161 };
162
163 template<typename T> struct functor_traits
164 {
165 enum
166 {
167 Cost = 10,
168 PacketAccess = false,
169 IsRepeatable = false
170 };
171 };
172
173 template<typename T> struct packet_traits;
174
175 template<typename T> struct unpacket_traits
176 {
177 typedef T type;
178 typedef T half;
179 enum
180 {
181 size = 1,
182 alignment = 1
183 };
184 };
185
186 template<int Size, typename PacketType,
187 bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
188 struct find_best_packet_helper;
189
190 template< int Size, typename PacketType>
191 struct find_best_packet_helper<Size,PacketType,true>
192 {
193 typedef PacketType type;
194 };
195
196 template<int Size, typename PacketType>
197 struct find_best_packet_helper<Size,PacketType,false>
198 {
199 typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;
200 };
201
202 template<typename T, int Size>
203 struct find_best_packet
204 {
205 typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;
206 };
207
208 #if EIGEN_MAX_STATIC_ALIGN_BYTES>0
209 template<int ArrayBytes, int AlignmentBytes,
210 bool Match = bool((ArrayBytes%AlignmentBytes)==0),
211 bool TryHalf = bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >
212 struct compute_default_alignment_helper
213 {
214 enum { value = 0 };
215 };
216
217 template<int ArrayBytes, int AlignmentBytes, bool TryHalf>
218 struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match
219 {
220 enum { value = AlignmentBytes };
221 };
222
223 template<int ArrayBytes, int AlignmentBytes>
224 struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half
225 {
226 // current packet too large, try with an half-packet
227 enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };
228 };
229 #else
230 // If static alignment is disabled, no need to bother.
231 // This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)"
232 template<int ArrayBytes, int AlignmentBytes>
233 struct compute_default_alignment_helper
234 {
235 enum { value = 0 };
236 };
237 #endif
238
239 template<typename T, int Size> struct compute_default_alignment {
240 enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };
241 };
242
243 template<typename T> struct compute_default_alignment<T,Dynamic> {
244 enum { value = EIGEN_MAX_ALIGN_BYTES };
245 };
246
247 template<typename _Scalar, int _Rows, int _Cols,
248 int _Options = AutoAlign |
249 ( (_Rows==1 && _Cols!=1) ? RowMajor
250 : (_Cols==1 && _Rows!=1) ? ColMajor
251 : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
252 int _MaxRows = _Rows,
253 int _MaxCols = _Cols
254 > class make_proper_matrix_type
255 {
256 enum {
257 IsColVector = _Cols==1 && _Rows!=1,
258 IsRowVector = _Rows==1 && _Cols!=1,
259 Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
260 : IsRowVector ? (_Options | RowMajor) & ~ColMajor
261 : _Options
262 };
263 public:
264 typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
265 };
266
267 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
268 class compute_matrix_flags
269 {
270 enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };
271 public:
272 // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>
273 // and then propagate this information to the evaluator's flags.
274 // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.
275 enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };
276 };
277
278 template<int _Rows, int _Cols> struct size_at_compile_time
279 {
280 enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
281 };
282
283 template<typename XprType> struct size_of_xpr_at_compile_time
284 {
285 enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };
286 };
287
288 /* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
289 * whereas eval is a const reference in the case of a matrix
290 */
291
292 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
293 template<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;
294 template<typename T> struct plain_matrix_type<T,Dense>
295 {
296 typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;
297 };
298 template<typename T> struct plain_matrix_type<T,DiagonalShape>
299 {
300 typedef typename T::PlainObject type;
301 };
302
303 template<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>
304 {
305 typedef Matrix<typename traits<T>::Scalar,
306 traits<T>::RowsAtCompileTime,
307 traits<T>::ColsAtCompileTime,
308 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
309 traits<T>::MaxRowsAtCompileTime,
310 traits<T>::MaxColsAtCompileTime
311 > type;
312 };
313
314 template<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>
315 {
316 typedef Array<typename traits<T>::Scalar,
317 traits<T>::RowsAtCompileTime,
318 traits<T>::ColsAtCompileTime,
319 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
320 traits<T>::MaxRowsAtCompileTime,
321 traits<T>::MaxColsAtCompileTime
322 > type;
323 };
324
325 /* eval : the return type of eval(). For matrices, this is just a const reference
326 * in order to avoid a useless copy
327 */
328
329 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
330
331 template<typename T> struct eval<T,Dense>
332 {
333 typedef typename plain_matrix_type<T>::type type;
334 // typedef typename T::PlainObject type;
335 // typedef T::Matrix<typename traits<T>::Scalar,
336 // traits<T>::RowsAtCompileTime,
337 // traits<T>::ColsAtCompileTime,
338 // AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
339 // traits<T>::MaxRowsAtCompileTime,
340 // traits<T>::MaxColsAtCompileTime
341 // > type;
342 };
343
344 template<typename T> struct eval<T,DiagonalShape>
345 {
346 typedef typename plain_matrix_type<T>::type type;
347 };
348
349 // for matrices, no need to evaluate, just use a const reference to avoid a useless copy
350 template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
351 struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
352 {
353 typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
354 };
355
356 template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
357 struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
358 {
359 typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
360 };
361
362
363 /* similar to plain_matrix_type, but using the evaluator's Flags */
364 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;
365
366 template<typename T>
367 struct plain_object_eval<T,Dense>
368 {
369 typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;
370 };
371
372
373 /* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
374 */
375 template<typename T> struct plain_matrix_type_column_major
376 {
377 enum { Rows = traits<T>::RowsAtCompileTime,
378 Cols = traits<T>::ColsAtCompileTime,
379 MaxRows = traits<T>::MaxRowsAtCompileTime,
380 MaxCols = traits<T>::MaxColsAtCompileTime
381 };
382 typedef Matrix<typename traits<T>::Scalar,
383 Rows,
384 Cols,
385 (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
386 MaxRows,
387 MaxCols
388 > type;
389 };
390
391 /* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
392 */
393 template<typename T> struct plain_matrix_type_row_major
394 {
395 enum { Rows = traits<T>::RowsAtCompileTime,
396 Cols = traits<T>::ColsAtCompileTime,
397 MaxRows = traits<T>::MaxRowsAtCompileTime,
398 MaxCols = traits<T>::MaxColsAtCompileTime
399 };
400 typedef Matrix<typename traits<T>::Scalar,
401 Rows,
402 Cols,
403 (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
404 MaxRows,
405 MaxCols
406 > type;
407 };
408
409 /** \internal The reference selector for template expressions. The idea is that we don't
410 * need to use references for expressions since they are light weight proxy
411 * objects which should generate no copying overhead. */
412 template <typename T>
413 struct ref_selector
414 {
415 typedef typename conditional<
416 bool(traits<T>::Flags & NestByRefBit),
417 T const&,
418 const T
419 >::type type;
420
421 typedef typename conditional<
422 bool(traits<T>::Flags & NestByRefBit),
423 T &,
424 T
425 >::type non_const_type;
426 };
427
428 /** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
429 template<typename T1, typename T2>
430 struct transfer_constness
431 {
432 typedef typename conditional<
433 bool(internal::is_const<T1>::value),
434 typename internal::add_const_on_value_type<T2>::type,
435 T2
436 >::type type;
437 };
438
439
440 // However, we still need a mechanism to detect whether an expression which is evaluated multiple time
441 // has to be evaluated into a temporary.
442 // That's the purpose of this new nested_eval helper:
443 /** \internal Determines how a given expression should be nested when evaluated multiple times.
444 * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
445 * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or
446 * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
447 * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
448 * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
449 *
450 * \tparam T the type of the expression being nested.
451 * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
452 * \tparam PlainObject the type of the temporary if needed.
453 */
454 template<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval
455 {
456 enum {
457 ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
458 CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a tempory?
459 // Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
460 // This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
461 // for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
462 // Another solution could be to count the number of temps?
463 NAsInteger = n == Dynamic ? HugeCost : n,
464 CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,
465 CostNoEval = NAsInteger * CoeffReadCost,
466 Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))
467 };
468
469 typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;
470 };
471
472 template<typename T>
473 EIGEN_DEVICE_FUNC
474 inline T* const_cast_ptr(const T* ptr)
475 {
476 return const_cast<T*>(ptr);
477 }
478
479 template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
480 struct dense_xpr_base
481 {
482 /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
483 };
484
485 template<typename Derived>
486 struct dense_xpr_base<Derived, MatrixXpr>
487 {
488 typedef MatrixBase<Derived> type;
489 };
490
491 template<typename Derived>
492 struct dense_xpr_base<Derived, ArrayXpr>
493 {
494 typedef ArrayBase<Derived> type;
495 };
496
497 template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
498 struct generic_xpr_base;
499
500 template<typename Derived, typename XprKind>
501 struct generic_xpr_base<Derived, XprKind, Dense>
502 {
503 typedef typename dense_xpr_base<Derived,XprKind>::type type;
504 };
505
506 template<typename XprType, typename CastType> struct cast_return_type
507 {
508 typedef typename XprType::Scalar CurrentScalarType;
509 typedef typename remove_all<CastType>::type _CastType;
510 typedef typename _CastType::Scalar NewScalarType;
511 typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
512 const XprType&,CastType>::type type;
513 };
514
515 template <typename A, typename B> struct promote_storage_type;
516
517 template <typename A> struct promote_storage_type<A,A>
518 {
519 typedef A ret;
520 };
521 template <typename A> struct promote_storage_type<A, const A>
522 {
523 typedef A ret;
524 };
525 template <typename A> struct promote_storage_type<const A, A>
526 {
527 typedef A ret;
528 };
529
530 /** \internal Specify the "storage kind" of applying a coefficient-wise
531 * binary operations between two expressions of kinds A and B respectively.
532 * The template parameter Functor permits to specialize the resulting storage kind wrt to
533 * the functor.
534 * The default rules are as follows:
535 * \code
536 * A op A -> A
537 * A op dense -> dense
538 * dense op B -> dense
539 * sparse op dense -> sparse
540 * dense op sparse -> sparse
541 * \endcode
542 */
543 template <typename A, typename B, typename Functor> struct cwise_promote_storage_type;
544
545 template <typename A, typename Functor> struct cwise_promote_storage_type<A,A,Functor> { typedef A ret; };
546 template <typename Functor> struct cwise_promote_storage_type<Dense,Dense,Functor> { typedef Dense ret; };
547 template <typename A, typename Functor> struct cwise_promote_storage_type<A,Dense,Functor> { typedef Dense ret; };
548 template <typename B, typename Functor> struct cwise_promote_storage_type<Dense,B,Functor> { typedef Dense ret; };
549 template <typename Functor> struct cwise_promote_storage_type<Sparse,Dense,Functor> { typedef Sparse ret; };
550 template <typename Functor> struct cwise_promote_storage_type<Dense,Sparse,Functor> { typedef Sparse ret; };
551
552 template <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {
553 enum { value = LhsOrder };
554 };
555
556 template <typename LhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder> { enum { value = RhsOrder }; };
557 template <typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder> { enum { value = LhsOrder }; };
558 template <int Order> struct cwise_promote_storage_order<Sparse,Sparse,Order,Order> { enum { value = Order }; };
559
560
561 /** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B.
562 * The template parameter ProductTag permits to specialize the resulting storage kind wrt to
563 * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.
564 * The default rules are as follows:
565 * \code
566 * K * K -> K
567 * dense * K -> dense
568 * K * dense -> dense
569 * diag * K -> K
570 * K * diag -> K
571 * Perm * K -> K
572 * K * Perm -> K
573 * \endcode
574 */
575 template <typename A, typename B, int ProductTag> struct product_promote_storage_type;
576
577 template <typename A, int ProductTag> struct product_promote_storage_type<A, A, ProductTag> { typedef A ret;};
578 template <int ProductTag> struct product_promote_storage_type<Dense, Dense, ProductTag> { typedef Dense ret;};
579 template <typename A, int ProductTag> struct product_promote_storage_type<A, Dense, ProductTag> { typedef Dense ret; };
580 template <typename B, int ProductTag> struct product_promote_storage_type<Dense, B, ProductTag> { typedef Dense ret; };
581
582 template <typename A, int ProductTag> struct product_promote_storage_type<A, DiagonalShape, ProductTag> { typedef A ret; };
583 template <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape, B, ProductTag> { typedef B ret; };
584 template <int ProductTag> struct product_promote_storage_type<Dense, DiagonalShape, ProductTag> { typedef Dense ret; };
585 template <int ProductTag> struct product_promote_storage_type<DiagonalShape, Dense, ProductTag> { typedef Dense ret; };
586
587 template <typename A, int ProductTag> struct product_promote_storage_type<A, PermutationStorage, ProductTag> { typedef A ret; };
588 template <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B, ProductTag> { typedef B ret; };
589 template <int ProductTag> struct product_promote_storage_type<Dense, PermutationStorage, ProductTag> { typedef Dense ret; };
590 template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Dense, ProductTag> { typedef Dense ret; };
591
592 /** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
593 * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
594 */
595 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
596 struct plain_row_type
597 {
598 typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
599 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
600 typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
601 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
602
603 typedef typename conditional<
604 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
605 MatrixRowType,
606 ArrayRowType
607 >::type type;
608 };
609
610 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
611 struct plain_col_type
612 {
613 typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
614 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
615 typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
616 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
617
618 typedef typename conditional<
619 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
620 MatrixColType,
621 ArrayColType
622 >::type type;
623 };
624
625 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
626 struct plain_diag_type
627 {
628 enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
629 max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
630 };
631 typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
632 typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
633
634 typedef typename conditional<
635 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
636 MatrixDiagType,
637 ArrayDiagType
638 >::type type;
639 };
640
641 template<typename Expr,typename Scalar = typename Expr::Scalar>
642 struct plain_constant_type
643 {
644 enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };
645
646 typedef Array<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
647 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;
648
649 typedef Matrix<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
650 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;
651
652 typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;
653 };
654
655 template<typename ExpressionType>
656 struct is_lvalue
657 {
658 enum { value = (!bool(is_const<ExpressionType>::value)) &&
659 bool(traits<ExpressionType>::Flags & LvalueBit) };
660 };
661
662 template<typename T> struct is_diagonal
663 { enum { ret = false }; };
664
665 template<typename T> struct is_diagonal<DiagonalBase<T> >
666 { enum { ret = true }; };
667
668 template<typename T> struct is_diagonal<DiagonalWrapper<T> >
669 { enum { ret = true }; };
670
671 template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
672 { enum { ret = true }; };
673
674 template<typename S1, typename S2> struct glue_shapes;
675 template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
676
677 template<typename T1, typename T2>
678 bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<has_direct_access<T1>::ret&&has_direct_access<T2>::ret, T1>::type * = 0)
679 {
680 return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
681 }
682
683 template<typename T1, typename T2>
684 bool is_same_dense(const T1 &, const T2 &, typename enable_if<!(has_direct_access<T1>::ret&&has_direct_access<T2>::ret), T1>::type * = 0)
685 {
686 return false;
687 }
688
689 // Internal helper defining the cost of a scalar division for the type T.
690 // The default heuristic can be specialized for each scalar type and architecture.
691 template<typename T,bool Vectorized=false,typename EnaleIf = void>
692 struct scalar_div_cost {
693 enum { value = 8*NumTraits<T>::MulCost };
694 };
695
696 template<typename T,bool Vectorized>
697 struct scalar_div_cost<std::complex<T>, Vectorized> {
698 enum { value = 2*scalar_div_cost<T>::value
699 + 6*NumTraits<T>::MulCost
700 + 3*NumTraits<T>::AddCost
701 };
702 };
703
704
705 template<bool Vectorized>
706 struct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };
707 template<bool Vectorized>
708 struct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };
709
710
711 #ifdef EIGEN_DEBUG_ASSIGN
712 std::string demangle_traversal(int t)
713 {
714 if(t==DefaultTraversal) return "DefaultTraversal";
715 if(t==LinearTraversal) return "LinearTraversal";
716 if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal";
717 if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal";
718 if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal";
719 return "?";
720 }
721 std::string demangle_unrolling(int t)
722 {
723 if(t==NoUnrolling) return "NoUnrolling";
724 if(t==InnerUnrolling) return "InnerUnrolling";
725 if(t==CompleteUnrolling) return "CompleteUnrolling";
726 return "?";
727 }
728 std::string demangle_flags(int f)
729 {
730 std::string res;
731 if(f&RowMajorBit) res += " | RowMajor";
732 if(f&PacketAccessBit) res += " | Packet";
733 if(f&LinearAccessBit) res += " | Linear";
734 if(f&LvalueBit) res += " | Lvalue";
735 if(f&DirectAccessBit) res += " | Direct";
736 if(f&NestByRefBit) res += " | NestByRef";
737 if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
738
739 return res;
740 }
741 #endif
742
743 } // end namespace internal
744
745
746 /** \class ScalarBinaryOpTraits
747 * \ingroup Core_Module
748 *
749 * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
750 *
751 * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.
752 *
753 * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3.
754 * You can let %Eigen knows that by defining:
755 \code
756 template<typename BinaryOp>
757 struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType; };
758 template<typename BinaryOp>
759 struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType; };
760 \endcode
761 * You can then explicitly disable some particular operations to get more explicit error messages:
762 \code
763 template<>
764 struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};
765 \endcode
766 * Or customize the return type for individual operation:
767 \code
768 template<>
769 struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };
770 \endcode
771 *
772 * By default, the following generic combinations are supported:
773 <table class="manual">
774 <tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>
775 <tr ><td>\c T </td><td>\c T </td><td>\c * </td><td>\c T </td><td></td></tr>
776 <tr class="alt"><td>\c NumTraits<T>::Real </td><td>\c T </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
777 <tr ><td>\c T </td><td>\c NumTraits<T>::Real </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
778 </table>
779 *
780 * \sa CwiseBinaryOp
781 */
782 template<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >
783 struct ScalarBinaryOpTraits
784 #ifndef EIGEN_PARSED_BY_DOXYGEN
785 // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
786 : internal::scalar_product_traits<ScalarA,ScalarB>
787 #endif // EIGEN_PARSED_BY_DOXYGEN
788 {};
789
790 template<typename T, typename BinaryOp>
791 struct ScalarBinaryOpTraits<T,T,BinaryOp>
792 {
793 typedef T ReturnType;
794 };
795
796 template <typename T, typename BinaryOp>
797 struct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>
798 {
799 typedef T ReturnType;
800 };
801 template <typename T, typename BinaryOp>
802 struct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>
803 {
804 typedef T ReturnType;
805 };
806
807 // For Matrix * Permutation
808 template<typename T, typename BinaryOp>
809 struct ScalarBinaryOpTraits<T,void,BinaryOp>
810 {
811 typedef T ReturnType;
812 };
813
814 // For Permutation * Matrix
815 template<typename T, typename BinaryOp>
816 struct ScalarBinaryOpTraits<void,T,BinaryOp>
817 {
818 typedef T ReturnType;
819 };
820
821 // for Permutation*Permutation
822 template<typename BinaryOp>
823 struct ScalarBinaryOpTraits<void,void,BinaryOp>
824 {
825 typedef void ReturnType;
826 };
827
828 // We require Lhs and Rhs to have "compatible" scalar types.
829 // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
830 // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
831 // add together a float matrix and a double matrix.
832 #define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
833 EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
834 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
835
836 } // end namespace Eigen
837
838 #endif // EIGEN_XPRHELPER_H
839