1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_GENERAL_BLOCK_PANEL_H
11 #define EIGEN_GENERAL_BLOCK_PANEL_H
12 
13 
14 namespace Eigen {
15 
16 namespace internal {
17 
18 template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
19 class gebp_traits;
20 
21 
22 /** \internal \returns b if a<=0, and returns a otherwise. */
manage_caching_sizes_helper(std::ptrdiff_t a,std::ptrdiff_t b)23 inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
24 {
25   return a<=0 ? b : a;
26 }
27 
28 #if EIGEN_ARCH_i386_OR_x86_64
29 const std::ptrdiff_t defaultL1CacheSize = 32*1024;
30 const std::ptrdiff_t defaultL2CacheSize = 256*1024;
31 const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
32 #else
33 const std::ptrdiff_t defaultL1CacheSize = 16*1024;
34 const std::ptrdiff_t defaultL2CacheSize = 512*1024;
35 const std::ptrdiff_t defaultL3CacheSize = 512*1024;
36 #endif
37 
38 /** \internal */
39 struct CacheSizes {
CacheSizesCacheSizes40   CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
41     int l1CacheSize, l2CacheSize, l3CacheSize;
42     queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);
43     m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize);
44     m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize);
45     m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize);
46   }
47 
48   std::ptrdiff_t m_l1;
49   std::ptrdiff_t m_l2;
50   std::ptrdiff_t m_l3;
51 };
52 
53 
54 /** \internal */
manage_caching_sizes(Action action,std::ptrdiff_t * l1,std::ptrdiff_t * l2,std::ptrdiff_t * l3)55 inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
56 {
57   static CacheSizes m_cacheSizes;
58 
59   if(action==SetAction)
60   {
61     // set the cpu cache size and cache all block sizes from a global cache size in byte
62     eigen_internal_assert(l1!=0 && l2!=0);
63     m_cacheSizes.m_l1 = *l1;
64     m_cacheSizes.m_l2 = *l2;
65     m_cacheSizes.m_l3 = *l3;
66   }
67   else if(action==GetAction)
68   {
69     eigen_internal_assert(l1!=0 && l2!=0);
70     *l1 = m_cacheSizes.m_l1;
71     *l2 = m_cacheSizes.m_l2;
72     *l3 = m_cacheSizes.m_l3;
73   }
74   else
75   {
76     eigen_internal_assert(false);
77   }
78 }
79 
80 /* Helper for computeProductBlockingSizes.
81  *
82  * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
83  * this function computes the blocking size parameters along the respective dimensions
84  * for matrix products and related algorithms. The blocking sizes depends on various
85  * parameters:
86  * - the L1 and L2 cache sizes,
87  * - the register level blocking sizes defined by gebp_traits,
88  * - the number of scalars that fit into a packet (when vectorization is enabled).
89  *
90  * \sa setCpuCacheSizes */
91 
92 template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>
93 void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)
94 {
95   typedef gebp_traits<LhsScalar,RhsScalar> Traits;
96 
97   // Explanations:
98   // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and
99   // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed
100   // per mr x kc horizontal small panels where mr is the blocking size along the m dimension
101   // at the register level. This small horizontal panel has to stay within L1 cache.
102   std::ptrdiff_t l1, l2, l3;
103   manage_caching_sizes(GetAction, &l1, &l2, &l3);
104 
105   if (num_threads > 1) {
106     typedef typename Traits::ResScalar ResScalar;
107     enum {
108       kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
109       ksub = Traits::mr * Traits::nr * sizeof(ResScalar),
110       kr = 8,
111       mr = Traits::mr,
112       nr = Traits::nr
113     };
114     // Increasing k gives us more time to prefetch the content of the "C"
115     // registers. However once the latency is hidden there is no point in
116     // increasing the value of k, so we'll cap it at 320 (value determined
117     // experimentally).
118     // To avoid that k vanishes, we make k_cache at least as big as kr
119     const Index k_cache = numext::maxi<Index>(kr, (numext::mini<Index>)((l1-ksub)/kdiv, 320));
120     if (k_cache < k) {
121       k = k_cache - (k_cache % kr);
122       eigen_internal_assert(k > 0);
123     }
124 
125     const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k);
126     const Index n_per_thread = numext::div_ceil(n, num_threads);
127     if (n_cache <= n_per_thread) {
128       // Don't exceed the capacity of the l2 cache.
129       eigen_internal_assert(n_cache >= static_cast<Index>(nr));
130       n = n_cache - (n_cache % nr);
131       eigen_internal_assert(n > 0);
132     } else {
133       n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr));
134     }
135 
136     if (l3 > l2) {
137       // l3 is shared between all cores, so we'll give each thread its own chunk of l3.
138       const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);
139       const Index m_per_thread = numext::div_ceil(m, num_threads);
140       if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
141         m = m_cache - (m_cache % mr);
142         eigen_internal_assert(m > 0);
143       } else {
144         m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr));
145       }
146     }
147   }
148   else {
149     // In unit tests we do not want to use extra large matrices,
150     // so we reduce the cache size to check the blocking strategy is not flawed
151 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
152     l1 = 9*1024;
153     l2 = 32*1024;
154     l3 = 512*1024;
155 #endif
156 
157     // Early return for small problems because the computation below are time consuming for small problems.
158     // Perhaps it would make more sense to consider k*n*m??
159     // Note that for very tiny problem, this function should be bypassed anyway
160     // because we use the coefficient-based implementation for them.
161     if((numext::maxi)(k,(numext::maxi)(m,n))<48)
162       return;
163 
164     typedef typename Traits::ResScalar ResScalar;
165     enum {
166       k_peeling = 8,
167       k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
168       k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)
169     };
170 
171     // ---- 1st level of blocking on L1, yields kc ----
172 
173     // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel
174     // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.
175     // We also include a register-level block of the result (mx x nr).
176     // (In an ideal world only the lhs panel would stay in L1)
177     // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:
178     const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
179     const Index old_k = k;
180     if(k>max_kc)
181     {
182       // We are really blocking on the third dimension:
183       // -> reduce blocking size to make sure the last block is as large as possible
184       //    while keeping the same number of sweeps over the result.
185       k = (k%max_kc)==0 ? max_kc
186                         : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
187 
188       eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same");
189     }
190 
191     // ---- 2nd level of blocking on max(L2,L3), yields nc ----
192 
193     // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:
194     //      actual_l2 = max(l2, l3/nb_core_sharing_l3)
195     // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)
196     // For instance, it corresponds to 6MB of L3 shared among 4 cores.
197     #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
198     const Index actual_l2 = l3;
199     #else
200     const Index actual_l2 = 1572864; // == 1.5 MB
201     #endif
202 
203     // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.
204     // The second half is implicitly reserved to access the result and lhs coefficients.
205     // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful
206     // to limit this growth: we bound nc to growth by a factor x1.5.
207     // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all,
208     // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space.
209     Index max_nc;
210     const Index lhs_bytes = m * k * sizeof(LhsScalar);
211     const Index remaining_l1 = l1- k_sub - lhs_bytes;
212     if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k)
213     {
214       // L1 blocking
215       max_nc = remaining_l1 / (k*sizeof(RhsScalar));
216     }
217     else
218     {
219       // L2 blocking
220       max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));
221     }
222     // WARNING Below, we assume that Traits::nr is a power of two.
223     Index nc = numext::mini<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
224     if(n>nc)
225     {
226       // We are really blocking over the columns:
227       // -> reduce blocking size to make sure the last block is as large as possible
228       //    while keeping the same number of sweeps over the packed lhs.
229       //    Here we allow one more sweep if this gives us a perfect match, thus the commented "-1"
230       n = (n%nc)==0 ? nc
231                     : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1))));
232     }
233     else if(old_k==k)
234     {
235       // So far, no blocking at all, i.e., kc==k, and nc==n.
236       // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2
237       // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete.
238       Index problem_size = k*n*sizeof(LhsScalar);
239       Index actual_lm = actual_l2;
240       Index max_mc = m;
241       if(problem_size<=1024)
242       {
243         // problem is small enough to keep in L1
244         // Let's choose m such that lhs's block fit in 1/3 of L1
245         actual_lm = l1;
246       }
247       else if(l3!=0 && problem_size<=32768)
248       {
249         // we have both L2 and L3, and problem is small enough to be kept in L2
250         // Let's choose m such that lhs's block fit in 1/3 of L2
251         actual_lm = l2;
252         max_mc = (numext::mini<Index>)(576,max_mc);
253       }
254       Index mc = (numext::mini<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
255       if (mc > Traits::mr) mc -= mc % Traits::mr;
256       else if (mc==0) return;
257       m = (m%mc)==0 ? mc
258                     : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1))));
259     }
260   }
261 }
262 
263 template <typename Index>
useSpecificBlockingSizes(Index & k,Index & m,Index & n)264 inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
265 {
266 #ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
267   if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
268     k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
269     m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
270     n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
271     return true;
272   }
273 #else
274   EIGEN_UNUSED_VARIABLE(k)
275   EIGEN_UNUSED_VARIABLE(m)
276   EIGEN_UNUSED_VARIABLE(n)
277 #endif
278   return false;
279 }
280 
281 /** \brief Computes the blocking parameters for a m x k times k x n matrix product
282   *
283   * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.
284   * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.
285   * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.
286   *
287   * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
288   * this function computes the blocking size parameters along the respective dimensions
289   * for matrix products and related algorithms.
290   *
291   * The blocking size parameters may be evaluated:
292   *   - either by a heuristic based on cache sizes;
293   *   - or using fixed prescribed values (for testing purposes).
294   *
295   * \sa setCpuCacheSizes */
296 
297 template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>
298 void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
299 {
300   if (!useSpecificBlockingSizes(k, m, n)) {
301     evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads);
302   }
303 }
304 
305 template<typename LhsScalar, typename RhsScalar, typename Index>
306 inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
307 {
308   computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);
309 }
310 
311 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
312   #define CJMADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);
313 #else
314 
315   // FIXME (a bit overkill maybe ?)
316 
317   template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
rungebp_madd_selector318     EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
319     {
320       c = cj.pmadd(a,b,c);
321     }
322   };
323 
324   template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
325     EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
326     {
327       t = b; t = cj.pmul(a,t); c = padd(c,t);
328     }
329   };
330 
331   template<typename CJ, typename A, typename B, typename C, typename T>
332   EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
333   {
334     gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
335   }
336 
337   #define CJMADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);
338 //   #define CJMADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);
339 #endif
340 
341 /* Vectorization logic
342  *  real*real: unpack rhs to constant packets, ...
343  *
344  *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
345  *          storing each res packet into two packets (2x2),
346  *          at the end combine them: swap the second and addsub them
347  *  cf*cf : same but with 2x4 blocks
348  *  cplx*real : unpack rhs to constant packets, ...
349  *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
350  */
351 template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
352 class gebp_traits
353 {
354 public:
355   typedef _LhsScalar LhsScalar;
356   typedef _RhsScalar RhsScalar;
357   typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
358 
359   enum {
360     ConjLhs = _ConjLhs,
361     ConjRhs = _ConjRhs,
362     Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
363     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
364     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
365     ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
366 
367     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
368 
369     // register block size along the N direction must be 1 or 4
370     nr = 4,
371 
372     // register block size along the M direction (currently, this one cannot be modified)
373     default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
374 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
375     // we assume 16 registers
376     // See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined,
377     // then using 3*LhsPacketSize triggers non-implemented paths in syrk.
378     mr = Vectorizable ? 3*LhsPacketSize : default_mr,
379 #else
380     mr = default_mr,
381 #endif
382 
383     LhsProgress = LhsPacketSize,
384     RhsProgress = 1
385   };
386 
387   typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
388   typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
389   typedef typename packet_traits<ResScalar>::type  _ResPacket;
390 
391   typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
392   typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
393   typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
394 
395   typedef ResPacket AccPacket;
396 
397   EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
398   {
399     p = pset1<ResPacket>(ResScalar(0));
400   }
401 
402   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
403   {
404     pbroadcast4(b, b0, b1, b2, b3);
405   }
406 
407 //   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
408 //   {
409 //     pbroadcast2(b, b0, b1);
410 //   }
411 
412   template<typename RhsPacketType>
413   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
414   {
415     dest = pset1<RhsPacketType>(*b);
416   }
417 
418   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
419   {
420     dest = ploadquad<RhsPacket>(b);
421   }
422 
423   template<typename LhsPacketType>
424   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const
425   {
426     dest = pload<LhsPacketType>(a);
427   }
428 
429   template<typename LhsPacketType>
430   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
431   {
432     dest = ploadu<LhsPacketType>(a);
433   }
434 
435   template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
436   EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const
437   {
438     conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj;
439     // It would be a lot cleaner to call pmadd all the time. Unfortunately if we
440     // let gcc allocate the register in which to store the result of the pmul
441     // (in the case where there is no FMA) gcc fails to figure out how to avoid
442     // spilling register.
443 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
444     EIGEN_UNUSED_VARIABLE(tmp);
445     c = cj.pmadd(a,b,c);
446 #else
447     tmp = b; tmp = cj.pmul(a,tmp); c = padd(c,tmp);
448 #endif
449   }
450 
451   EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
452   {
453     r = pmadd(c,alpha,r);
454   }
455 
456   template<typename ResPacketHalf>
457   EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const
458   {
459     r = pmadd(c,alpha,r);
460   }
461 
462 };
463 
464 template<typename RealScalar, bool _ConjLhs>
465 class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
466 {
467 public:
468   typedef std::complex<RealScalar> LhsScalar;
469   typedef RealScalar RhsScalar;
470   typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
471 
472   enum {
473     ConjLhs = _ConjLhs,
474     ConjRhs = false,
475     Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
476     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
477     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
478     ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
479 
480     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
481     nr = 4,
482 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
483     // we assume 16 registers
484     mr = 3*LhsPacketSize,
485 #else
486     mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
487 #endif
488 
489     LhsProgress = LhsPacketSize,
490     RhsProgress = 1
491   };
492 
493   typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
494   typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
495   typedef typename packet_traits<ResScalar>::type  _ResPacket;
496 
497   typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
498   typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
499   typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
500 
501   typedef ResPacket AccPacket;
502 
503   EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
504   {
505     p = pset1<ResPacket>(ResScalar(0));
506   }
507 
508   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
509   {
510     dest = pset1<RhsPacket>(*b);
511   }
512 
513   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
514   {
515     dest = pset1<RhsPacket>(*b);
516   }
517 
518   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
519   {
520     dest = pload<LhsPacket>(a);
521   }
522 
523   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
524   {
525     dest = ploadu<LhsPacket>(a);
526   }
527 
528   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
529   {
530     pbroadcast4(b, b0, b1, b2, b3);
531   }
532 
533 //   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
534 //   {
535 //     pbroadcast2(b, b0, b1);
536 //   }
537 
538   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
539   {
540     madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
541   }
542 
543   EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
544   {
545 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
546     EIGEN_UNUSED_VARIABLE(tmp);
547     c.v = pmadd(a.v,b,c.v);
548 #else
549     tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
550 #endif
551   }
552 
553   EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
554   {
555     c += a * b;
556   }
557 
558   EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
559   {
560     r = cj.pmadd(c,alpha,r);
561   }
562 
563 protected:
564   conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
565 };
566 
567 template<typename Packet>
568 struct DoublePacket
569 {
570   Packet first;
571   Packet second;
572 };
573 
574 template<typename Packet>
575 DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
576 {
577   DoublePacket<Packet> res;
578   res.first  = padd(a.first, b.first);
579   res.second = padd(a.second,b.second);
580   return res;
581 }
582 
583 template<typename Packet>
584 const DoublePacket<Packet>& predux_downto4(const DoublePacket<Packet> &a)
585 {
586   return a;
587 }
588 
589 template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };
590 // template<typename Packet>
591 // DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
592 // {
593 //   DoublePacket<Packet> res;
594 //   res.first  = padd(a.first, b.first);
595 //   res.second = padd(a.second,b.second);
596 //   return res;
597 // }
598 
599 template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
600 class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
601 {
602 public:
603   typedef std::complex<RealScalar>  Scalar;
604   typedef std::complex<RealScalar>  LhsScalar;
605   typedef std::complex<RealScalar>  RhsScalar;
606   typedef std::complex<RealScalar>  ResScalar;
607 
608   enum {
609     ConjLhs = _ConjLhs,
610     ConjRhs = _ConjRhs,
611     Vectorizable = packet_traits<RealScalar>::Vectorizable
612                 && packet_traits<Scalar>::Vectorizable,
613     RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,
614     ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,
615     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
616     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
617 
618     // FIXME: should depend on NumberOfRegisters
619     nr = 4,
620     mr = ResPacketSize,
621 
622     LhsProgress = ResPacketSize,
623     RhsProgress = 1
624   };
625 
626   typedef typename packet_traits<RealScalar>::type RealPacket;
627   typedef typename packet_traits<Scalar>::type     ScalarPacket;
628   typedef DoublePacket<RealPacket> DoublePacketType;
629 
630   typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;
631   typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
632   typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
633   typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
634 
635   EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
636 
637   EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p)
638   {
639     p.first   = pset1<RealPacket>(RealScalar(0));
640     p.second  = pset1<RealPacket>(RealScalar(0));
641   }
642 
643   // Scalar path
644   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const
645   {
646     dest = pset1<ResPacket>(*b);
647   }
648 
649   // Vectorized path
650   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const
651   {
652     dest.first  = pset1<RealPacket>(numext::real(*b));
653     dest.second = pset1<RealPacket>(numext::imag(*b));
654   }
655 
656   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
657   {
658     loadRhs(b,dest);
659   }
660   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
661   {
662     eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
663     loadRhs(b,dest);
664   }
665 
666   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
667   {
668     // FIXME not sure that's the best way to implement it!
669     loadRhs(b+0, b0);
670     loadRhs(b+1, b1);
671     loadRhs(b+2, b2);
672     loadRhs(b+3, b3);
673   }
674 
675   // Vectorized path
676   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
677   {
678     // FIXME not sure that's the best way to implement it!
679     loadRhs(b+0, b0);
680     loadRhs(b+1, b1);
681   }
682 
683   // Scalar path
684   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
685   {
686     // FIXME not sure that's the best way to implement it!
687     loadRhs(b+0, b0);
688     loadRhs(b+1, b1);
689   }
690 
691   // nothing special here
692   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
693   {
694     dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
695   }
696 
697   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
698   {
699     dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
700   }
701 
702   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const
703   {
704     c.first   = padd(pmul(a,b.first), c.first);
705     c.second  = padd(pmul(a,b.second),c.second);
706   }
707 
708   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
709   {
710     c = cj.pmadd(a,b,c);
711   }
712 
713   EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
714 
715   EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const
716   {
717     // assemble c
718     ResPacket tmp;
719     if((!ConjLhs)&&(!ConjRhs))
720     {
721       tmp = pcplxflip(pconj(ResPacket(c.second)));
722       tmp = padd(ResPacket(c.first),tmp);
723     }
724     else if((!ConjLhs)&&(ConjRhs))
725     {
726       tmp = pconj(pcplxflip(ResPacket(c.second)));
727       tmp = padd(ResPacket(c.first),tmp);
728     }
729     else if((ConjLhs)&&(!ConjRhs))
730     {
731       tmp = pcplxflip(ResPacket(c.second));
732       tmp = padd(pconj(ResPacket(c.first)),tmp);
733     }
734     else if((ConjLhs)&&(ConjRhs))
735     {
736       tmp = pcplxflip(ResPacket(c.second));
737       tmp = psub(pconj(ResPacket(c.first)),tmp);
738     }
739 
740     r = pmadd(tmp,alpha,r);
741   }
742 
743 protected:
744   conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
745 };
746 
747 template<typename RealScalar, bool _ConjRhs>
748 class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
749 {
750 public:
751   typedef std::complex<RealScalar>  Scalar;
752   typedef RealScalar  LhsScalar;
753   typedef Scalar      RhsScalar;
754   typedef Scalar      ResScalar;
755 
756   enum {
757     ConjLhs = false,
758     ConjRhs = _ConjRhs,
759     Vectorizable = packet_traits<RealScalar>::Vectorizable
760                 && packet_traits<Scalar>::Vectorizable,
761     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
762     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
763     ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
764 
765     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
766     // FIXME: should depend on NumberOfRegisters
767     nr = 4,
768     mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
769 
770     LhsProgress = ResPacketSize,
771     RhsProgress = 1
772   };
773 
774   typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
775   typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
776   typedef typename packet_traits<ResScalar>::type  _ResPacket;
777 
778   typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
779   typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
780   typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
781 
782   typedef ResPacket AccPacket;
783 
784   EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
785   {
786     p = pset1<ResPacket>(ResScalar(0));
787   }
788 
789   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
790   {
791     dest = pset1<RhsPacket>(*b);
792   }
793 
794   void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
795   {
796     pbroadcast4(b, b0, b1, b2, b3);
797   }
798 
799 //   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
800 //   {
801 //     // FIXME not sure that's the best way to implement it!
802 //     b0 = pload1<RhsPacket>(b+0);
803 //     b1 = pload1<RhsPacket>(b+1);
804 //   }
805 
806   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
807   {
808     dest = ploaddup<LhsPacket>(a);
809   }
810 
811   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
812   {
813     eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
814     loadRhs(b,dest);
815   }
816 
817   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
818   {
819     dest = ploaddup<LhsPacket>(a);
820   }
821 
822   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
823   {
824     madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
825   }
826 
827   EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
828   {
829 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
830     EIGEN_UNUSED_VARIABLE(tmp);
831     c.v = pmadd(a,b.v,c.v);
832 #else
833     tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
834 #endif
835 
836   }
837 
838   EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
839   {
840     c += a * b;
841   }
842 
843   EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
844   {
845     r = cj.pmadd(alpha,c,r);
846   }
847 
848 protected:
849   conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
850 };
851 
852 /* optimized GEneral packed Block * packed Panel product kernel
853  *
854  * Mixing type logic: C += A * B
855  *  |  A  |  B  | comments
856  *  |real |cplx | no vectorization yet, would require to pack A with duplication
857  *  |cplx |real | easy vectorization
858  */
859 template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
860 struct gebp_kernel
861 {
862   typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
863   typedef typename Traits::ResScalar ResScalar;
864   typedef typename Traits::LhsPacket LhsPacket;
865   typedef typename Traits::RhsPacket RhsPacket;
866   typedef typename Traits::ResPacket ResPacket;
867   typedef typename Traits::AccPacket AccPacket;
868 
869   typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
870   typedef typename SwappedTraits::ResScalar SResScalar;
871   typedef typename SwappedTraits::LhsPacket SLhsPacket;
872   typedef typename SwappedTraits::RhsPacket SRhsPacket;
873   typedef typename SwappedTraits::ResPacket SResPacket;
874   typedef typename SwappedTraits::AccPacket SAccPacket;
875 
876   typedef typename DataMapper::LinearMapper LinearMapper;
877 
878   enum {
879     Vectorizable  = Traits::Vectorizable,
880     LhsProgress   = Traits::LhsProgress,
881     RhsProgress   = Traits::RhsProgress,
882     ResPacketSize = Traits::ResPacketSize
883   };
884 
885   EIGEN_DONT_INLINE
886   void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,
887                   Index rows, Index depth, Index cols, ResScalar alpha,
888                   Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
889 };
890 
891 template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
892 EIGEN_DONT_INLINE
893 void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
894   ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,
895                Index rows, Index depth, Index cols, ResScalar alpha,
896                Index strideA, Index strideB, Index offsetA, Index offsetB)
897   {
898     Traits traits;
899     SwappedTraits straits;
900 
901     if(strideA==-1) strideA = depth;
902     if(strideB==-1) strideB = depth;
903     conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
904     Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
905     const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
906     const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
907     const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
908     enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)
909     const Index peeled_kc  = depth & ~(pk-1);
910     const Index prefetch_res_offset = 32/sizeof(ResScalar);
911 //     const Index depth2     = depth & ~1;
912 
913     //---------- Process 3 * LhsProgress rows at once ----------
914     // This corresponds to 3*LhsProgress x nr register blocks.
915     // Usually, make sense only with FMA
916     if(mr>=3*Traits::LhsProgress)
917     {
918       // Here, the general idea is to loop on each largest micro horizontal panel of the lhs (3*Traits::LhsProgress x depth)
919       // and on each largest micro vertical panel of the rhs (depth * nr).
920       // Blocking sizes, i.e., 'depth' has been computed so that the micro horizontal panel of the lhs fit in L1.
921       // However, if depth is too small, we can extend the number of rows of these horizontal panels.
922       // This actual number of rows is computed as follow:
923       const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.
924       // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size
925       // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),
926       // or because we are testing specific blocking sizes.
927       const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 3*LhsProgress) ));
928       for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)
929       {
930         const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);
931         for(Index j2=0; j2<packet_cols4; j2+=nr)
932         {
933           for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
934           {
935 
936           // We selected a 3*Traits::LhsProgress x nr micro block of res which is entirely
937           // stored into 3 x nr registers.
938 
939           const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];
940           prefetch(&blA[0]);
941 
942           // gets res block as register
943           AccPacket C0, C1, C2,  C3,
944                     C4, C5, C6,  C7,
945                     C8, C9, C10, C11;
946           traits.initAcc(C0);  traits.initAcc(C1);  traits.initAcc(C2);  traits.initAcc(C3);
947           traits.initAcc(C4);  traits.initAcc(C5);  traits.initAcc(C6);  traits.initAcc(C7);
948           traits.initAcc(C8);  traits.initAcc(C9);  traits.initAcc(C10); traits.initAcc(C11);
949 
950           LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
951           LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
952           LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
953           LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
954 
955           r0.prefetch(0);
956           r1.prefetch(0);
957           r2.prefetch(0);
958           r3.prefetch(0);
959 
960           // performs "inner" products
961           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
962           prefetch(&blB[0]);
963           LhsPacket A0, A1;
964 
965           for(Index k=0; k<peeled_kc; k+=pk)
966           {
967             EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX4");
968             RhsPacket B_0, T0;
969             LhsPacket A2;
970 
971 #define EIGEN_GEBP_ONESTEP(K) \
972             do { \
973               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
974               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
975               internal::prefetch(blA+(3*K+16)*LhsProgress); \
976               if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \
977               traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \
978               traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \
979               traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \
980               traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \
981               traits.madd(A0, B_0, C0, T0); \
982               traits.madd(A1, B_0, C4, T0); \
983               traits.madd(A2, B_0, C8, B_0); \
984               traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \
985               traits.madd(A0, B_0, C1, T0); \
986               traits.madd(A1, B_0, C5, T0); \
987               traits.madd(A2, B_0, C9, B_0); \
988               traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \
989               traits.madd(A0, B_0, C2,  T0); \
990               traits.madd(A1, B_0, C6,  T0); \
991               traits.madd(A2, B_0, C10, B_0); \
992               traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \
993               traits.madd(A0, B_0, C3 , T0); \
994               traits.madd(A1, B_0, C7,  T0); \
995               traits.madd(A2, B_0, C11, B_0); \
996               EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
997             } while(false)
998 
999             internal::prefetch(blB);
1000             EIGEN_GEBP_ONESTEP(0);
1001             EIGEN_GEBP_ONESTEP(1);
1002             EIGEN_GEBP_ONESTEP(2);
1003             EIGEN_GEBP_ONESTEP(3);
1004             EIGEN_GEBP_ONESTEP(4);
1005             EIGEN_GEBP_ONESTEP(5);
1006             EIGEN_GEBP_ONESTEP(6);
1007             EIGEN_GEBP_ONESTEP(7);
1008 
1009             blB += pk*4*RhsProgress;
1010             blA += pk*3*Traits::LhsProgress;
1011 
1012             EIGEN_ASM_COMMENT("end gebp micro kernel 3pX4");
1013           }
1014           // process remaining peeled loop
1015           for(Index k=peeled_kc; k<depth; k++)
1016           {
1017             RhsPacket B_0, T0;
1018             LhsPacket A2;
1019             EIGEN_GEBP_ONESTEP(0);
1020             blB += 4*RhsProgress;
1021             blA += 3*Traits::LhsProgress;
1022           }
1023 
1024 #undef EIGEN_GEBP_ONESTEP
1025 
1026           ResPacket R0, R1, R2;
1027           ResPacket alphav = pset1<ResPacket>(alpha);
1028 
1029           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1030           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1031           R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1032           traits.acc(C0, alphav, R0);
1033           traits.acc(C4, alphav, R1);
1034           traits.acc(C8, alphav, R2);
1035           r0.storePacket(0 * Traits::ResPacketSize, R0);
1036           r0.storePacket(1 * Traits::ResPacketSize, R1);
1037           r0.storePacket(2 * Traits::ResPacketSize, R2);
1038 
1039           R0 = r1.loadPacket(0 * Traits::ResPacketSize);
1040           R1 = r1.loadPacket(1 * Traits::ResPacketSize);
1041           R2 = r1.loadPacket(2 * Traits::ResPacketSize);
1042           traits.acc(C1, alphav, R0);
1043           traits.acc(C5, alphav, R1);
1044           traits.acc(C9, alphav, R2);
1045           r1.storePacket(0 * Traits::ResPacketSize, R0);
1046           r1.storePacket(1 * Traits::ResPacketSize, R1);
1047           r1.storePacket(2 * Traits::ResPacketSize, R2);
1048 
1049           R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1050           R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1051           R2 = r2.loadPacket(2 * Traits::ResPacketSize);
1052           traits.acc(C2, alphav, R0);
1053           traits.acc(C6, alphav, R1);
1054           traits.acc(C10, alphav, R2);
1055           r2.storePacket(0 * Traits::ResPacketSize, R0);
1056           r2.storePacket(1 * Traits::ResPacketSize, R1);
1057           r2.storePacket(2 * Traits::ResPacketSize, R2);
1058 
1059           R0 = r3.loadPacket(0 * Traits::ResPacketSize);
1060           R1 = r3.loadPacket(1 * Traits::ResPacketSize);
1061           R2 = r3.loadPacket(2 * Traits::ResPacketSize);
1062           traits.acc(C3, alphav, R0);
1063           traits.acc(C7, alphav, R1);
1064           traits.acc(C11, alphav, R2);
1065           r3.storePacket(0 * Traits::ResPacketSize, R0);
1066           r3.storePacket(1 * Traits::ResPacketSize, R1);
1067           r3.storePacket(2 * Traits::ResPacketSize, R2);
1068           }
1069         }
1070 
1071         // Deal with remaining columns of the rhs
1072         for(Index j2=packet_cols4; j2<cols; j2++)
1073         {
1074           for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
1075           {
1076           // One column at a time
1077           const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
1078           prefetch(&blA[0]);
1079 
1080           // gets res block as register
1081           AccPacket C0, C4, C8;
1082           traits.initAcc(C0);
1083           traits.initAcc(C4);
1084           traits.initAcc(C8);
1085 
1086           LinearMapper r0 = res.getLinearMapper(i, j2);
1087           r0.prefetch(0);
1088 
1089           // performs "inner" products
1090           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1091           LhsPacket A0, A1, A2;
1092 
1093           for(Index k=0; k<peeled_kc; k+=pk)
1094           {
1095             EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX1");
1096             RhsPacket B_0;
1097 #define EIGEN_GEBGP_ONESTEP(K) \
1098             do { \
1099               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
1100               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1101               traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \
1102               traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \
1103               traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \
1104               traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);   \
1105               traits.madd(A0, B_0, C0, B_0); \
1106               traits.madd(A1, B_0, C4, B_0); \
1107               traits.madd(A2, B_0, C8, B_0); \
1108               EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
1109             } while(false)
1110 
1111             EIGEN_GEBGP_ONESTEP(0);
1112             EIGEN_GEBGP_ONESTEP(1);
1113             EIGEN_GEBGP_ONESTEP(2);
1114             EIGEN_GEBGP_ONESTEP(3);
1115             EIGEN_GEBGP_ONESTEP(4);
1116             EIGEN_GEBGP_ONESTEP(5);
1117             EIGEN_GEBGP_ONESTEP(6);
1118             EIGEN_GEBGP_ONESTEP(7);
1119 
1120             blB += pk*RhsProgress;
1121             blA += pk*3*Traits::LhsProgress;
1122 
1123             EIGEN_ASM_COMMENT("end gebp micro kernel 3pX1");
1124           }
1125 
1126           // process remaining peeled loop
1127           for(Index k=peeled_kc; k<depth; k++)
1128           {
1129             RhsPacket B_0;
1130             EIGEN_GEBGP_ONESTEP(0);
1131             blB += RhsProgress;
1132             blA += 3*Traits::LhsProgress;
1133           }
1134 #undef EIGEN_GEBGP_ONESTEP
1135           ResPacket R0, R1, R2;
1136           ResPacket alphav = pset1<ResPacket>(alpha);
1137 
1138           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1139           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1140           R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1141           traits.acc(C0, alphav, R0);
1142           traits.acc(C4, alphav, R1);
1143           traits.acc(C8, alphav, R2);
1144           r0.storePacket(0 * Traits::ResPacketSize, R0);
1145           r0.storePacket(1 * Traits::ResPacketSize, R1);
1146           r0.storePacket(2 * Traits::ResPacketSize, R2);
1147           }
1148         }
1149       }
1150     }
1151 
1152     //---------- Process 2 * LhsProgress rows at once ----------
1153     if(mr>=2*Traits::LhsProgress)
1154     {
1155       const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.
1156       // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size
1157       // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),
1158       // or because we are testing specific blocking sizes.
1159       Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 2*LhsProgress) ));
1160 
1161       for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)
1162       {
1163         Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);
1164         for(Index j2=0; j2<packet_cols4; j2+=nr)
1165         {
1166           for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1167           {
1168 
1169           // We selected a 2*Traits::LhsProgress x nr micro block of res which is entirely
1170           // stored into 2 x nr registers.
1171 
1172           const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1173           prefetch(&blA[0]);
1174 
1175           // gets res block as register
1176           AccPacket C0, C1, C2, C3,
1177                     C4, C5, C6, C7;
1178           traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
1179           traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
1180 
1181           LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1182           LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1183           LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1184           LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1185 
1186           r0.prefetch(prefetch_res_offset);
1187           r1.prefetch(prefetch_res_offset);
1188           r2.prefetch(prefetch_res_offset);
1189           r3.prefetch(prefetch_res_offset);
1190 
1191           // performs "inner" products
1192           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1193           prefetch(&blB[0]);
1194           LhsPacket A0, A1;
1195 
1196           for(Index k=0; k<peeled_kc; k+=pk)
1197           {
1198             EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX4");
1199             RhsPacket B_0, B1, B2, B3, T0;
1200 
1201           // NOTE: the begin/end asm comments below work around bug 935!
1202           // but they are not enough for gcc>=6 without FMA (bug 1637)
1203           #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
1204             #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__  ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1));
1205           #else
1206             #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND
1207           #endif
1208           #define EIGEN_GEBGP_ONESTEP(K) \
1209             do {                                                                \
1210               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4");        \
1211               traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                    \
1212               traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                    \
1213               traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \
1214               traits.madd(A0, B_0, C0, T0);                                     \
1215               traits.madd(A1, B_0, C4, B_0);                                    \
1216               traits.madd(A0, B1,  C1, T0);                                     \
1217               traits.madd(A1, B1,  C5, B1);                                     \
1218               traits.madd(A0, B2,  C2, T0);                                     \
1219               traits.madd(A1, B2,  C6, B2);                                     \
1220               traits.madd(A0, B3,  C3, T0);                                     \
1221               traits.madd(A1, B3,  C7, B3);                                     \
1222               EIGEN_GEBP_2PX4_SPILLING_WORKAROUND                               \
1223               EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4");          \
1224             } while(false)
1225 
1226             internal::prefetch(blB+(48+0));
1227             EIGEN_GEBGP_ONESTEP(0);
1228             EIGEN_GEBGP_ONESTEP(1);
1229             EIGEN_GEBGP_ONESTEP(2);
1230             EIGEN_GEBGP_ONESTEP(3);
1231             internal::prefetch(blB+(48+16));
1232             EIGEN_GEBGP_ONESTEP(4);
1233             EIGEN_GEBGP_ONESTEP(5);
1234             EIGEN_GEBGP_ONESTEP(6);
1235             EIGEN_GEBGP_ONESTEP(7);
1236 
1237             blB += pk*4*RhsProgress;
1238             blA += pk*(2*Traits::LhsProgress);
1239 
1240             EIGEN_ASM_COMMENT("end gebp micro kernel 2pX4");
1241           }
1242           // process remaining peeled loop
1243           for(Index k=peeled_kc; k<depth; k++)
1244           {
1245             RhsPacket B_0, B1, B2, B3, T0;
1246             EIGEN_GEBGP_ONESTEP(0);
1247             blB += 4*RhsProgress;
1248             blA += 2*Traits::LhsProgress;
1249           }
1250 #undef EIGEN_GEBGP_ONESTEP
1251 
1252           ResPacket R0, R1, R2, R3;
1253           ResPacket alphav = pset1<ResPacket>(alpha);
1254 
1255           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1256           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1257           R2 = r1.loadPacket(0 * Traits::ResPacketSize);
1258           R3 = r1.loadPacket(1 * Traits::ResPacketSize);
1259           traits.acc(C0, alphav, R0);
1260           traits.acc(C4, alphav, R1);
1261           traits.acc(C1, alphav, R2);
1262           traits.acc(C5, alphav, R3);
1263           r0.storePacket(0 * Traits::ResPacketSize, R0);
1264           r0.storePacket(1 * Traits::ResPacketSize, R1);
1265           r1.storePacket(0 * Traits::ResPacketSize, R2);
1266           r1.storePacket(1 * Traits::ResPacketSize, R3);
1267 
1268           R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1269           R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1270           R2 = r3.loadPacket(0 * Traits::ResPacketSize);
1271           R3 = r3.loadPacket(1 * Traits::ResPacketSize);
1272           traits.acc(C2,  alphav, R0);
1273           traits.acc(C6,  alphav, R1);
1274           traits.acc(C3,  alphav, R2);
1275           traits.acc(C7,  alphav, R3);
1276           r2.storePacket(0 * Traits::ResPacketSize, R0);
1277           r2.storePacket(1 * Traits::ResPacketSize, R1);
1278           r3.storePacket(0 * Traits::ResPacketSize, R2);
1279           r3.storePacket(1 * Traits::ResPacketSize, R3);
1280           }
1281         }
1282 
1283         // Deal with remaining columns of the rhs
1284         for(Index j2=packet_cols4; j2<cols; j2++)
1285         {
1286           for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1287           {
1288           // One column at a time
1289           const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1290           prefetch(&blA[0]);
1291 
1292           // gets res block as register
1293           AccPacket C0, C4;
1294           traits.initAcc(C0);
1295           traits.initAcc(C4);
1296 
1297           LinearMapper r0 = res.getLinearMapper(i, j2);
1298           r0.prefetch(prefetch_res_offset);
1299 
1300           // performs "inner" products
1301           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1302           LhsPacket A0, A1;
1303 
1304           for(Index k=0; k<peeled_kc; k+=pk)
1305           {
1306             EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX1");
1307             RhsPacket B_0, B1;
1308 
1309 #define EIGEN_GEBGP_ONESTEP(K) \
1310             do {                                                                  \
1311               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1");          \
1312               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1313               traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                      \
1314               traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                      \
1315               traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                       \
1316               traits.madd(A0, B_0, C0, B1);                                       \
1317               traits.madd(A1, B_0, C4, B_0);                                      \
1318               EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1");            \
1319             } while(false)
1320 
1321             EIGEN_GEBGP_ONESTEP(0);
1322             EIGEN_GEBGP_ONESTEP(1);
1323             EIGEN_GEBGP_ONESTEP(2);
1324             EIGEN_GEBGP_ONESTEP(3);
1325             EIGEN_GEBGP_ONESTEP(4);
1326             EIGEN_GEBGP_ONESTEP(5);
1327             EIGEN_GEBGP_ONESTEP(6);
1328             EIGEN_GEBGP_ONESTEP(7);
1329 
1330             blB += pk*RhsProgress;
1331             blA += pk*2*Traits::LhsProgress;
1332 
1333             EIGEN_ASM_COMMENT("end gebp micro kernel 2pX1");
1334           }
1335 
1336           // process remaining peeled loop
1337           for(Index k=peeled_kc; k<depth; k++)
1338           {
1339             RhsPacket B_0, B1;
1340             EIGEN_GEBGP_ONESTEP(0);
1341             blB += RhsProgress;
1342             blA += 2*Traits::LhsProgress;
1343           }
1344 #undef EIGEN_GEBGP_ONESTEP
1345           ResPacket R0, R1;
1346           ResPacket alphav = pset1<ResPacket>(alpha);
1347 
1348           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1349           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1350           traits.acc(C0, alphav, R0);
1351           traits.acc(C4, alphav, R1);
1352           r0.storePacket(0 * Traits::ResPacketSize, R0);
1353           r0.storePacket(1 * Traits::ResPacketSize, R1);
1354           }
1355         }
1356       }
1357     }
1358     //---------- Process 1 * LhsProgress rows at once ----------
1359     if(mr>=1*Traits::LhsProgress)
1360     {
1361       // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)
1362       for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
1363       {
1364         // loops on each largest micro vertical panel of rhs (depth * nr)
1365         for(Index j2=0; j2<packet_cols4; j2+=nr)
1366         {
1367           // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely
1368           // stored into 1 x nr registers.
1369 
1370           const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1371           prefetch(&blA[0]);
1372 
1373           // gets res block as register
1374           AccPacket C0, C1, C2, C3;
1375           traits.initAcc(C0);
1376           traits.initAcc(C1);
1377           traits.initAcc(C2);
1378           traits.initAcc(C3);
1379 
1380           LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1381           LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1382           LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1383           LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1384 
1385           r0.prefetch(prefetch_res_offset);
1386           r1.prefetch(prefetch_res_offset);
1387           r2.prefetch(prefetch_res_offset);
1388           r3.prefetch(prefetch_res_offset);
1389 
1390           // performs "inner" products
1391           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1392           prefetch(&blB[0]);
1393           LhsPacket A0;
1394 
1395           for(Index k=0; k<peeled_kc; k+=pk)
1396           {
1397             EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX4");
1398             RhsPacket B_0, B1, B2, B3;
1399 
1400 #define EIGEN_GEBGP_ONESTEP(K) \
1401             do {                                                                \
1402               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4");        \
1403               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1404               traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \
1405               traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \
1406               traits.madd(A0, B_0, C0, B_0);                                    \
1407               traits.madd(A0, B1,  C1, B1);                                     \
1408               traits.madd(A0, B2,  C2, B2);                                     \
1409               traits.madd(A0, B3,  C3, B3);                                     \
1410               EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4");          \
1411             } while(false)
1412 
1413             internal::prefetch(blB+(48+0));
1414             EIGEN_GEBGP_ONESTEP(0);
1415             EIGEN_GEBGP_ONESTEP(1);
1416             EIGEN_GEBGP_ONESTEP(2);
1417             EIGEN_GEBGP_ONESTEP(3);
1418             internal::prefetch(blB+(48+16));
1419             EIGEN_GEBGP_ONESTEP(4);
1420             EIGEN_GEBGP_ONESTEP(5);
1421             EIGEN_GEBGP_ONESTEP(6);
1422             EIGEN_GEBGP_ONESTEP(7);
1423 
1424             blB += pk*4*RhsProgress;
1425             blA += pk*1*LhsProgress;
1426 
1427             EIGEN_ASM_COMMENT("end gebp micro kernel 1pX4");
1428           }
1429           // process remaining peeled loop
1430           for(Index k=peeled_kc; k<depth; k++)
1431           {
1432             RhsPacket B_0, B1, B2, B3;
1433             EIGEN_GEBGP_ONESTEP(0);
1434             blB += 4*RhsProgress;
1435             blA += 1*LhsProgress;
1436           }
1437 #undef EIGEN_GEBGP_ONESTEP
1438 
1439           ResPacket R0, R1;
1440           ResPacket alphav = pset1<ResPacket>(alpha);
1441 
1442           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1443           R1 = r1.loadPacket(0 * Traits::ResPacketSize);
1444           traits.acc(C0, alphav, R0);
1445           traits.acc(C1,  alphav, R1);
1446           r0.storePacket(0 * Traits::ResPacketSize, R0);
1447           r1.storePacket(0 * Traits::ResPacketSize, R1);
1448 
1449           R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1450           R1 = r3.loadPacket(0 * Traits::ResPacketSize);
1451           traits.acc(C2,  alphav, R0);
1452           traits.acc(C3,  alphav, R1);
1453           r2.storePacket(0 * Traits::ResPacketSize, R0);
1454           r3.storePacket(0 * Traits::ResPacketSize, R1);
1455         }
1456 
1457         // Deal with remaining columns of the rhs
1458         for(Index j2=packet_cols4; j2<cols; j2++)
1459         {
1460           // One column at a time
1461           const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1462           prefetch(&blA[0]);
1463 
1464           // gets res block as register
1465           AccPacket C0;
1466           traits.initAcc(C0);
1467 
1468           LinearMapper r0 = res.getLinearMapper(i, j2);
1469 
1470           // performs "inner" products
1471           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1472           LhsPacket A0;
1473 
1474           for(Index k=0; k<peeled_kc; k+=pk)
1475           {
1476             EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX1");
1477             RhsPacket B_0;
1478 
1479 #define EIGEN_GEBGP_ONESTEP(K) \
1480             do {                                                                \
1481               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1");        \
1482               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1483               traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \
1484               traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                     \
1485               traits.madd(A0, B_0, C0, B_0);                                    \
1486               EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1");          \
1487             } while(false);
1488 
1489             EIGEN_GEBGP_ONESTEP(0);
1490             EIGEN_GEBGP_ONESTEP(1);
1491             EIGEN_GEBGP_ONESTEP(2);
1492             EIGEN_GEBGP_ONESTEP(3);
1493             EIGEN_GEBGP_ONESTEP(4);
1494             EIGEN_GEBGP_ONESTEP(5);
1495             EIGEN_GEBGP_ONESTEP(6);
1496             EIGEN_GEBGP_ONESTEP(7);
1497 
1498             blB += pk*RhsProgress;
1499             blA += pk*1*Traits::LhsProgress;
1500 
1501             EIGEN_ASM_COMMENT("end gebp micro kernel 1pX1");
1502           }
1503 
1504           // process remaining peeled loop
1505           for(Index k=peeled_kc; k<depth; k++)
1506           {
1507             RhsPacket B_0;
1508             EIGEN_GEBGP_ONESTEP(0);
1509             blB += RhsProgress;
1510             blA += 1*Traits::LhsProgress;
1511           }
1512 #undef EIGEN_GEBGP_ONESTEP
1513           ResPacket R0;
1514           ResPacket alphav = pset1<ResPacket>(alpha);
1515           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1516           traits.acc(C0, alphav, R0);
1517           r0.storePacket(0 * Traits::ResPacketSize, R0);
1518         }
1519       }
1520     }
1521     //---------- Process remaining rows, 1 at once ----------
1522     if(peeled_mc1<rows)
1523     {
1524       // loop on each panel of the rhs
1525       for(Index j2=0; j2<packet_cols4; j2+=nr)
1526       {
1527         // loop on each row of the lhs (1*LhsProgress x depth)
1528         for(Index i=peeled_mc1; i<rows; i+=1)
1529         {
1530           const LhsScalar* blA = &blockA[i*strideA+offsetA];
1531           prefetch(&blA[0]);
1532           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1533 
1534           // The following piece of code wont work for 512 bit registers
1535           // Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size
1536           // as nr (which is currently 4) for the return type.
1537           const int SResPacketHalfSize = unpacket_traits<typename unpacket_traits<SResPacket>::half>::size;
1538           if ((SwappedTraits::LhsProgress % 4) == 0 &&
1539               (SwappedTraits::LhsProgress <= 8) &&
1540               (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr))
1541           {
1542             SAccPacket C0, C1, C2, C3;
1543             straits.initAcc(C0);
1544             straits.initAcc(C1);
1545             straits.initAcc(C2);
1546             straits.initAcc(C3);
1547 
1548             const Index spk   = (std::max)(1,SwappedTraits::LhsProgress/4);
1549             const Index endk  = (depth/spk)*spk;
1550             const Index endk4 = (depth/(spk*4))*(spk*4);
1551 
1552             Index k=0;
1553             for(; k<endk4; k+=4*spk)
1554             {
1555               SLhsPacket A0,A1;
1556               SRhsPacket B_0,B_1;
1557 
1558               straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
1559               straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);
1560 
1561               straits.loadRhsQuad(blA+0*spk, B_0);
1562               straits.loadRhsQuad(blA+1*spk, B_1);
1563               straits.madd(A0,B_0,C0,B_0);
1564               straits.madd(A1,B_1,C1,B_1);
1565 
1566               straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
1567               straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
1568               straits.loadRhsQuad(blA+2*spk, B_0);
1569               straits.loadRhsQuad(blA+3*spk, B_1);
1570               straits.madd(A0,B_0,C2,B_0);
1571               straits.madd(A1,B_1,C3,B_1);
1572 
1573               blB += 4*SwappedTraits::LhsProgress;
1574               blA += 4*spk;
1575             }
1576             C0 = padd(padd(C0,C1),padd(C2,C3));
1577             for(; k<endk; k+=spk)
1578             {
1579               SLhsPacket A0;
1580               SRhsPacket B_0;
1581 
1582               straits.loadLhsUnaligned(blB, A0);
1583               straits.loadRhsQuad(blA, B_0);
1584               straits.madd(A0,B_0,C0,B_0);
1585 
1586               blB += SwappedTraits::LhsProgress;
1587               blA += spk;
1588             }
1589             if(SwappedTraits::LhsProgress==8)
1590             {
1591               // Special case where we have to first reduce the accumulation register C0
1592               typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
1593               typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
1594               typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
1595               typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
1596 
1597               SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
1598               SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);
1599 
1600               if(depth-endk>0)
1601               {
1602                 // We have to handle the last row of the rhs which corresponds to a half-packet
1603                 SLhsPacketHalf a0;
1604                 SRhsPacketHalf b0;
1605                 straits.loadLhsUnaligned(blB, a0);
1606                 straits.loadRhs(blA, b0);
1607                 SAccPacketHalf c0 = predux_downto4(C0);
1608                 straits.madd(a0,b0,c0,b0);
1609                 straits.acc(c0, alphav, R);
1610               }
1611               else
1612               {
1613                 straits.acc(predux_downto4(C0), alphav, R);
1614               }
1615               res.scatterPacket(i, j2, R);
1616             }
1617             else
1618             {
1619               SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
1620               SResPacket alphav = pset1<SResPacket>(alpha);
1621               straits.acc(C0, alphav, R);
1622               res.scatterPacket(i, j2, R);
1623             }
1624           }
1625           else // scalar path
1626           {
1627             // get a 1 x 4 res block as registers
1628             ResScalar C0(0), C1(0), C2(0), C3(0);
1629 
1630             for(Index k=0; k<depth; k++)
1631             {
1632               LhsScalar A0;
1633               RhsScalar B_0, B_1;
1634 
1635               A0 = blA[k];
1636 
1637               B_0 = blB[0];
1638               B_1 = blB[1];
1639               CJMADD(cj,A0,B_0,C0,  B_0);
1640               CJMADD(cj,A0,B_1,C1,  B_1);
1641 
1642               B_0 = blB[2];
1643               B_1 = blB[3];
1644               CJMADD(cj,A0,B_0,C2,  B_0);
1645               CJMADD(cj,A0,B_1,C3,  B_1);
1646 
1647               blB += 4;
1648             }
1649             res(i, j2 + 0) += alpha * C0;
1650             res(i, j2 + 1) += alpha * C1;
1651             res(i, j2 + 2) += alpha * C2;
1652             res(i, j2 + 3) += alpha * C3;
1653           }
1654         }
1655       }
1656       // remaining columns
1657       for(Index j2=packet_cols4; j2<cols; j2++)
1658       {
1659         // loop on each row of the lhs (1*LhsProgress x depth)
1660         for(Index i=peeled_mc1; i<rows; i+=1)
1661         {
1662           const LhsScalar* blA = &blockA[i*strideA+offsetA];
1663           prefetch(&blA[0]);
1664           // gets a 1 x 1 res block as registers
1665           ResScalar C0(0);
1666           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1667           for(Index k=0; k<depth; k++)
1668           {
1669             LhsScalar A0 = blA[k];
1670             RhsScalar B_0 = blB[k];
1671             CJMADD(cj, A0, B_0, C0, B_0);
1672           }
1673           res(i, j2) += alpha * C0;
1674         }
1675       }
1676     }
1677   }
1678 
1679 
1680 #undef CJMADD
1681 
1682 // pack a block of the lhs
1683 // The traversal is as follow (mr==4):
1684 //   0  4  8 12 ...
1685 //   1  5  9 13 ...
1686 //   2  6 10 14 ...
1687 //   3  7 11 15 ...
1688 //
1689 //  16 20 24 28 ...
1690 //  17 21 25 29 ...
1691 //  18 22 26 30 ...
1692 //  19 23 27 31 ...
1693 //
1694 //  32 33 34 35 ...
1695 //  36 36 38 39 ...
1696 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
1697 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
1698 {
1699   typedef typename DataMapper::LinearMapper LinearMapper;
1700   EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
1701 };
1702 
1703 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
1704 EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
1705   ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
1706 {
1707   typedef typename packet_traits<Scalar>::type Packet;
1708   enum { PacketSize = packet_traits<Scalar>::size };
1709 
1710   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
1711   EIGEN_UNUSED_VARIABLE(stride);
1712   EIGEN_UNUSED_VARIABLE(offset);
1713   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1714   eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
1715   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1716   Index count = 0;
1717 
1718   const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
1719   const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
1720   const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
1721   const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
1722                          : Pack2>1             ? (rows/Pack2)*Pack2 : 0;
1723 
1724   Index i=0;
1725 
1726   // Pack 3 packets
1727   if(Pack1>=3*PacketSize)
1728   {
1729     for(; i<peeled_mc3; i+=3*PacketSize)
1730     {
1731       if(PanelMode) count += (3*PacketSize) * offset;
1732 
1733       for(Index k=0; k<depth; k++)
1734       {
1735         Packet A, B, C;
1736         A = lhs.loadPacket(i+0*PacketSize, k);
1737         B = lhs.loadPacket(i+1*PacketSize, k);
1738         C = lhs.loadPacket(i+2*PacketSize, k);
1739         pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1740         pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1741         pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
1742       }
1743       if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
1744     }
1745   }
1746   // Pack 2 packets
1747   if(Pack1>=2*PacketSize)
1748   {
1749     for(; i<peeled_mc2; i+=2*PacketSize)
1750     {
1751       if(PanelMode) count += (2*PacketSize) * offset;
1752 
1753       for(Index k=0; k<depth; k++)
1754       {
1755         Packet A, B;
1756         A = lhs.loadPacket(i+0*PacketSize, k);
1757         B = lhs.loadPacket(i+1*PacketSize, k);
1758         pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1759         pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1760       }
1761       if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
1762     }
1763   }
1764   // Pack 1 packets
1765   if(Pack1>=1*PacketSize)
1766   {
1767     for(; i<peeled_mc1; i+=1*PacketSize)
1768     {
1769       if(PanelMode) count += (1*PacketSize) * offset;
1770 
1771       for(Index k=0; k<depth; k++)
1772       {
1773         Packet A;
1774         A = lhs.loadPacket(i+0*PacketSize, k);
1775         pstore(blockA+count, cj.pconj(A));
1776         count+=PacketSize;
1777       }
1778       if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
1779     }
1780   }
1781   // Pack scalars
1782   if(Pack2<PacketSize && Pack2>1)
1783   {
1784     for(; i<peeled_mc0; i+=Pack2)
1785     {
1786       if(PanelMode) count += Pack2 * offset;
1787 
1788       for(Index k=0; k<depth; k++)
1789         for(Index w=0; w<Pack2; w++)
1790           blockA[count++] = cj(lhs(i+w, k));
1791 
1792       if(PanelMode) count += Pack2 * (stride-offset-depth);
1793     }
1794   }
1795   for(; i<rows; i++)
1796   {
1797     if(PanelMode) count += offset;
1798     for(Index k=0; k<depth; k++)
1799       blockA[count++] = cj(lhs(i, k));
1800     if(PanelMode) count += (stride-offset-depth);
1801   }
1802 }
1803 
1804 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
1805 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
1806 {
1807   typedef typename DataMapper::LinearMapper LinearMapper;
1808   EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
1809 };
1810 
1811 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
1812 EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
1813   ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
1814 {
1815   typedef typename packet_traits<Scalar>::type Packet;
1816   enum { PacketSize = packet_traits<Scalar>::size };
1817 
1818   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
1819   EIGEN_UNUSED_VARIABLE(stride);
1820   EIGEN_UNUSED_VARIABLE(offset);
1821   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1822   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1823   Index count = 0;
1824 
1825 //   const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
1826 //   const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
1827 //   const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
1828 
1829   int pack = Pack1;
1830   Index i = 0;
1831   while(pack>0)
1832   {
1833     Index remaining_rows = rows-i;
1834     Index peeled_mc = i+(remaining_rows/pack)*pack;
1835     for(; i<peeled_mc; i+=pack)
1836     {
1837       if(PanelMode) count += pack * offset;
1838 
1839       const Index peeled_k = (depth/PacketSize)*PacketSize;
1840       Index k=0;
1841       if(pack>=PacketSize)
1842       {
1843         for(; k<peeled_k; k+=PacketSize)
1844         {
1845           for (Index m = 0; m < pack; m += PacketSize)
1846           {
1847             PacketBlock<Packet> kernel;
1848             for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
1849             ptranspose(kernel);
1850             for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
1851           }
1852           count += PacketSize*pack;
1853         }
1854       }
1855       for(; k<depth; k++)
1856       {
1857         Index w=0;
1858         for(; w<pack-3; w+=4)
1859         {
1860           Scalar a(cj(lhs(i+w+0, k))),
1861                  b(cj(lhs(i+w+1, k))),
1862                  c(cj(lhs(i+w+2, k))),
1863                  d(cj(lhs(i+w+3, k)));
1864           blockA[count++] = a;
1865           blockA[count++] = b;
1866           blockA[count++] = c;
1867           blockA[count++] = d;
1868         }
1869         if(pack%4)
1870           for(;w<pack;++w)
1871             blockA[count++] = cj(lhs(i+w, k));
1872       }
1873 
1874       if(PanelMode) count += pack * (stride-offset-depth);
1875     }
1876 
1877     pack -= PacketSize;
1878     if(pack<Pack2 && (pack+PacketSize)!=Pack2)
1879       pack = Pack2;
1880   }
1881 
1882   for(; i<rows; i++)
1883   {
1884     if(PanelMode) count += offset;
1885     for(Index k=0; k<depth; k++)
1886       blockA[count++] = cj(lhs(i, k));
1887     if(PanelMode) count += (stride-offset-depth);
1888   }
1889 }
1890 
1891 // copy a complete panel of the rhs
1892 // this version is optimized for column major matrices
1893 // The traversal order is as follow: (nr==4):
1894 //  0  1  2  3   12 13 14 15   24 27
1895 //  4  5  6  7   16 17 18 19   25 28
1896 //  8  9 10 11   20 21 22 23   26 29
1897 //  .  .  .  .    .  .  .  .    .  .
1898 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
1899 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
1900 {
1901   typedef typename packet_traits<Scalar>::type Packet;
1902   typedef typename DataMapper::LinearMapper LinearMapper;
1903   enum { PacketSize = packet_traits<Scalar>::size };
1904   EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
1905 };
1906 
1907 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
1908 EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
1909   ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
1910 {
1911   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
1912   EIGEN_UNUSED_VARIABLE(stride);
1913   EIGEN_UNUSED_VARIABLE(offset);
1914   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1915   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1916   Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
1917   Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
1918   Index count = 0;
1919   const Index peeled_k = (depth/PacketSize)*PacketSize;
1920 //   if(nr>=8)
1921 //   {
1922 //     for(Index j2=0; j2<packet_cols8; j2+=8)
1923 //     {
1924 //       // skip what we have before
1925 //       if(PanelMode) count += 8 * offset;
1926 //       const Scalar* b0 = &rhs[(j2+0)*rhsStride];
1927 //       const Scalar* b1 = &rhs[(j2+1)*rhsStride];
1928 //       const Scalar* b2 = &rhs[(j2+2)*rhsStride];
1929 //       const Scalar* b3 = &rhs[(j2+3)*rhsStride];
1930 //       const Scalar* b4 = &rhs[(j2+4)*rhsStride];
1931 //       const Scalar* b5 = &rhs[(j2+5)*rhsStride];
1932 //       const Scalar* b6 = &rhs[(j2+6)*rhsStride];
1933 //       const Scalar* b7 = &rhs[(j2+7)*rhsStride];
1934 //       Index k=0;
1935 //       if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
1936 //       {
1937 //         for(; k<peeled_k; k+=PacketSize) {
1938 //           PacketBlock<Packet> kernel;
1939 //           for (int p = 0; p < PacketSize; ++p) {
1940 //             kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]);
1941 //           }
1942 //           ptranspose(kernel);
1943 //           for (int p = 0; p < PacketSize; ++p) {
1944 //             pstoreu(blockB+count, cj.pconj(kernel.packet[p]));
1945 //             count+=PacketSize;
1946 //           }
1947 //         }
1948 //       }
1949 //       for(; k<depth; k++)
1950 //       {
1951 //         blockB[count+0] = cj(b0[k]);
1952 //         blockB[count+1] = cj(b1[k]);
1953 //         blockB[count+2] = cj(b2[k]);
1954 //         blockB[count+3] = cj(b3[k]);
1955 //         blockB[count+4] = cj(b4[k]);
1956 //         blockB[count+5] = cj(b5[k]);
1957 //         blockB[count+6] = cj(b6[k]);
1958 //         blockB[count+7] = cj(b7[k]);
1959 //         count += 8;
1960 //       }
1961 //       // skip what we have after
1962 //       if(PanelMode) count += 8 * (stride-offset-depth);
1963 //     }
1964 //   }
1965 
1966   if(nr>=4)
1967   {
1968     for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
1969     {
1970       // skip what we have before
1971       if(PanelMode) count += 4 * offset;
1972       const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
1973       const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
1974       const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
1975       const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
1976 
1977       Index k=0;
1978       if((PacketSize%4)==0) // TODO enable vectorized transposition for PacketSize==2 ??
1979       {
1980         for(; k<peeled_k; k+=PacketSize) {
1981           PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
1982           kernel.packet[0] = dm0.loadPacket(k);
1983           kernel.packet[1%PacketSize] = dm1.loadPacket(k);
1984           kernel.packet[2%PacketSize] = dm2.loadPacket(k);
1985           kernel.packet[3%PacketSize] = dm3.loadPacket(k);
1986           ptranspose(kernel);
1987           pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
1988           pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
1989           pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));
1990           pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));
1991           count+=4*PacketSize;
1992         }
1993       }
1994       for(; k<depth; k++)
1995       {
1996         blockB[count+0] = cj(dm0(k));
1997         blockB[count+1] = cj(dm1(k));
1998         blockB[count+2] = cj(dm2(k));
1999         blockB[count+3] = cj(dm3(k));
2000         count += 4;
2001       }
2002       // skip what we have after
2003       if(PanelMode) count += 4 * (stride-offset-depth);
2004     }
2005   }
2006 
2007   // copy the remaining columns one at a time (nr==1)
2008   for(Index j2=packet_cols4; j2<cols; ++j2)
2009   {
2010     if(PanelMode) count += offset;
2011     const LinearMapper dm0 = rhs.getLinearMapper(0, j2);
2012     for(Index k=0; k<depth; k++)
2013     {
2014       blockB[count] = cj(dm0(k));
2015       count += 1;
2016     }
2017     if(PanelMode) count += (stride-offset-depth);
2018   }
2019 }
2020 
2021 // this version is optimized for row major matrices
2022 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
2023 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
2024 {
2025   typedef typename packet_traits<Scalar>::type Packet;
2026   typedef typename DataMapper::LinearMapper LinearMapper;
2027   enum { PacketSize = packet_traits<Scalar>::size };
2028   EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
2029 };
2030 
2031 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
2032 EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
2033   ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
2034 {
2035   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
2036   EIGEN_UNUSED_VARIABLE(stride);
2037   EIGEN_UNUSED_VARIABLE(offset);
2038   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
2039   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2040   Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
2041   Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
2042   Index count = 0;
2043 
2044 //   if(nr>=8)
2045 //   {
2046 //     for(Index j2=0; j2<packet_cols8; j2+=8)
2047 //     {
2048 //       // skip what we have before
2049 //       if(PanelMode) count += 8 * offset;
2050 //       for(Index k=0; k<depth; k++)
2051 //       {
2052 //         if (PacketSize==8) {
2053 //           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
2054 //           pstoreu(blockB+count, cj.pconj(A));
2055 //         } else if (PacketSize==4) {
2056 //           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
2057 //           Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
2058 //           pstoreu(blockB+count, cj.pconj(A));
2059 //           pstoreu(blockB+count+PacketSize, cj.pconj(B));
2060 //         } else {
2061 //           const Scalar* b0 = &rhs[k*rhsStride + j2];
2062 //           blockB[count+0] = cj(b0[0]);
2063 //           blockB[count+1] = cj(b0[1]);
2064 //           blockB[count+2] = cj(b0[2]);
2065 //           blockB[count+3] = cj(b0[3]);
2066 //           blockB[count+4] = cj(b0[4]);
2067 //           blockB[count+5] = cj(b0[5]);
2068 //           blockB[count+6] = cj(b0[6]);
2069 //           blockB[count+7] = cj(b0[7]);
2070 //         }
2071 //         count += 8;
2072 //       }
2073 //       // skip what we have after
2074 //       if(PanelMode) count += 8 * (stride-offset-depth);
2075 //     }
2076 //   }
2077   if(nr>=4)
2078   {
2079     for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
2080     {
2081       // skip what we have before
2082       if(PanelMode) count += 4 * offset;
2083       for(Index k=0; k<depth; k++)
2084       {
2085         if (PacketSize==4) {
2086           Packet A = rhs.loadPacket(k, j2);
2087           pstoreu(blockB+count, cj.pconj(A));
2088           count += PacketSize;
2089         } else {
2090           const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
2091           blockB[count+0] = cj(dm0(0));
2092           blockB[count+1] = cj(dm0(1));
2093           blockB[count+2] = cj(dm0(2));
2094           blockB[count+3] = cj(dm0(3));
2095           count += 4;
2096         }
2097       }
2098       // skip what we have after
2099       if(PanelMode) count += 4 * (stride-offset-depth);
2100     }
2101   }
2102   // copy the remaining columns one at a time (nr==1)
2103   for(Index j2=packet_cols4; j2<cols; ++j2)
2104   {
2105     if(PanelMode) count += offset;
2106     for(Index k=0; k<depth; k++)
2107     {
2108       blockB[count] = cj(rhs(k, j2));
2109       count += 1;
2110     }
2111     if(PanelMode) count += stride-offset-depth;
2112   }
2113 }
2114 
2115 } // end namespace internal
2116 
2117 /** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
2118   * \sa setCpuCacheSize */
2119 inline std::ptrdiff_t l1CacheSize()
2120 {
2121   std::ptrdiff_t l1, l2, l3;
2122   internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2123   return l1;
2124 }
2125 
2126 /** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
2127   * \sa setCpuCacheSize */
2128 inline std::ptrdiff_t l2CacheSize()
2129 {
2130   std::ptrdiff_t l1, l2, l3;
2131   internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2132   return l2;
2133 }
2134 
2135 /** \returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size paramete\
2136 rs.
2137 * \sa setCpuCacheSize */
2138 inline std::ptrdiff_t l3CacheSize()
2139 {
2140   std::ptrdiff_t l1, l2, l3;
2141   internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2142   return l3;
2143 }
2144 
2145 /** Set the cpu L1 and L2 cache sizes (in bytes).
2146   * These values are use to adjust the size of the blocks
2147   * for the algorithms working per blocks.
2148   *
2149   * \sa computeProductBlockingSizes */
2150 inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
2151 {
2152   internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);
2153 }
2154 
2155 } // end namespace Eigen
2156 
2157 #endif // EIGEN_GENERAL_BLOCK_PANEL_H
2158