10 #ifndef EIGEN_GENERAL_BLOCK_PANEL_H
11 #define EIGEN_GENERAL_BLOCK_PANEL_H
18 template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs=false,
bool _ConjRhs=false>
23 inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
28 #if EIGEN_ARCH_i386_OR_x86_64
29 const std::ptrdiff_t defaultL1CacheSize = 32*1024;
30 const std::ptrdiff_t defaultL2CacheSize = 256*1024;
31 const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
33 const std::ptrdiff_t defaultL1CacheSize = 16*1024;
34 const std::ptrdiff_t defaultL2CacheSize = 512*1024;
35 const std::ptrdiff_t defaultL3CacheSize = 512*1024;
39 inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
41 static bool m_cache_sizes_initialized =
false;
42 static std::ptrdiff_t m_l1CacheSize = 0;
43 static std::ptrdiff_t m_l2CacheSize = 0;
44 static std::ptrdiff_t m_l3CacheSize = 0;
46 if(!m_cache_sizes_initialized)
48 int l1CacheSize, l2CacheSize, l3CacheSize;
49 queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);
50 m_l1CacheSize = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize);
51 m_l2CacheSize = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize);
52 m_l3CacheSize = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize);
53 m_cache_sizes_initialized =
true;
59 eigen_internal_assert(l1!=0 && l2!=0);
64 else if(action==GetAction)
66 eigen_internal_assert(l1!=0 && l2!=0);
73 eigen_internal_assert(
false);
89 template<
typename LhsScalar,
typename RhsScalar,
int KcFactor>
90 void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)
92 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
99 std::ptrdiff_t l1, l2, l3;
100 manage_caching_sizes(GetAction, &l1, &l2, &l3);
102 if (num_threads > 1) {
103 typedef typename Traits::ResScalar ResScalar;
105 kdiv = KcFactor * (Traits::mr *
sizeof(LhsScalar) + Traits::nr *
sizeof(RhsScalar)),
106 ksub = Traits::mr * Traits::nr *
sizeof(ResScalar),
119 const Index k_cache = (std::min<Index>)((l1-ksub)/kdiv, 320);
121 k = k_cache & k_mask;
122 eigen_internal_assert(k > 0);
125 const Index n_cache = (l2-l1) / (nr *
sizeof(RhsScalar) * k);
126 const Index n_per_thread = numext::div_ceil(n, num_threads);
127 if (n_cache <= n_per_thread) {
129 eigen_internal_assert(n_cache >= static_cast<Index>(nr));
130 n = n_cache & nr_mask;
131 eigen_internal_assert(n > 0);
133 n = (std::min<Index>)(n, (n_per_thread + nr - 1) & nr_mask);
138 const Index m_cache = (l3-l2) / (
sizeof(LhsScalar) * k * num_threads);
139 const Index m_per_thread = numext::div_ceil(m, num_threads);
140 if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
141 m = m_cache & mr_mask;
142 eigen_internal_assert(m > 0);
144 m = (std::min<Index>)(m, (m_per_thread + mr - 1) & mr_mask);
151 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
161 if((std::max)(k,(std::max)(m,n))<48)
164 typedef typename Traits::ResScalar ResScalar;
167 k_div = KcFactor * (Traits::mr *
sizeof(LhsScalar) + Traits::nr *
sizeof(RhsScalar)),
168 k_sub = Traits::mr * Traits::nr *
sizeof(ResScalar)
178 const Index max_kc = ((l1-k_sub)/k_div) & (~(k_peeling-1));
179 const Index old_k = k;
185 k = (k%max_kc)==0 ? max_kc
186 : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
188 eigen_internal_assert(((old_k/k) == (old_k/max_kc)) &&
"the number of sweeps has to remain the same");
197 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
198 const Index actual_l2 = l3;
200 const Index actual_l2 = 1572864;
212 const Index lhs_bytes = m * k *
sizeof(LhsScalar);
213 const Index remaining_l1 = l1- k_sub - lhs_bytes;
214 if(remaining_l1 >= Index(Traits::nr*
sizeof(RhsScalar))*k)
217 max_nc = remaining_l1 / (k*
sizeof(RhsScalar));
222 max_nc = (3*actual_l2)/(2*2*max_kc*
sizeof(RhsScalar));
225 Index nc = std::min<Index>(actual_l2/(2*k*
sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
233 : (nc - Traits::nr * ((nc-(n%nc))/(Traits::nr*(n/nc+1))));
240 Index problem_size = k*n*
sizeof(LhsScalar);
241 Index actual_lm = actual_l2;
243 if(problem_size<=1024)
249 else if(l3!=0 && problem_size<=32768)
256 Index mc = (std::min<Index>)(actual_lm/(3*k*
sizeof(LhsScalar)), max_mc);
257 if (mc > Traits::mr) mc -= mc % Traits::mr;
258 else if (mc==0)
return;
260 : (mc - Traits::mr * ((mc-(m%mc))/(Traits::mr*(m/mc+1))));
265 inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
267 #ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
268 if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
269 k = std::min<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
270 m = std::min<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
271 n = std::min<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
275 EIGEN_UNUSED_VARIABLE(k)
276 EIGEN_UNUSED_VARIABLE(m)
277 EIGEN_UNUSED_VARIABLE(n)
298 template<
typename LhsScalar,
typename RhsScalar,
int KcFactor>
299 void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
301 if (!useSpecificBlockingSizes(k, m, n)) {
302 evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor>(k, m, n, num_threads);
305 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
311 if (k > kr) k -= k % kr;
312 if (m > mr) m -= m % mr;
313 if (n > nr) n -= n % nr;
316 template<
typename LhsScalar,
typename RhsScalar>
317 inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
319 computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n, num_threads);
322 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
323 #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C);
328 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
struct gebp_madd_selector {
329 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, A& a, B& b, C& c, T& )
335 template<
typename CJ,
typename T>
struct gebp_madd_selector<CJ,T,T,T,T> {
336 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, T& a, T& b, T& c, T& t)
338 t = b; t = cj.pmul(a,t); c = padd(c,t);
342 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
343 EIGEN_STRONG_INLINE
void gebp_madd(
const CJ& cj, A& a, B& b, C& c, T& t)
345 gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
348 #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T);
362 template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs,
bool _ConjRhs>
366 typedef _LhsScalar LhsScalar;
367 typedef _RhsScalar RhsScalar;
368 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
373 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
374 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
375 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
376 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
378 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
384 default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
385 #
if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
389 mr = Vectorizable ? 3*LhsPacketSize : default_mr,
394 LhsProgress = LhsPacketSize,
398 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
399 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
400 typedef typename packet_traits<ResScalar>::type _ResPacket;
402 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
403 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
404 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
406 typedef ResPacket AccPacket;
408 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
410 p = pset1<ResPacket>(ResScalar(0));
413 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
415 pbroadcast4(b, b0, b1, b2, b3);
423 template<
typename RhsPacketType>
424 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacketType& dest)
const
426 dest = pset1<RhsPacketType>(*b);
429 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
431 dest = ploadquad<RhsPacket>(b);
434 template<
typename LhsPacketType>
435 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacketType& dest)
const
437 dest = pload<LhsPacketType>(a);
440 template<
typename LhsPacketType>
441 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacketType& dest)
const
443 dest = ploadu<LhsPacketType>(a);
446 template<
typename LhsPacketType,
typename RhsPacketType,
typename AccPacketType>
447 EIGEN_STRONG_INLINE
void madd(
const LhsPacketType& a,
const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp)
const
453 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
454 EIGEN_UNUSED_VARIABLE(tmp);
457 tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
461 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
463 r = pmadd(c,alpha,r);
466 template<
typename ResPacketHalf>
467 EIGEN_STRONG_INLINE
void acc(
const ResPacketHalf& c,
const ResPacketHalf& alpha, ResPacketHalf& r)
const
469 r = pmadd(c,alpha,r);
477 template<
typename RealScalar,
bool _ConjLhs>
478 class gebp_traits<
std::complex<RealScalar>, RealScalar, _ConjLhs, false>
481 typedef std::complex<RealScalar> LhsScalar;
482 typedef RealScalar RhsScalar;
483 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
488 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
489 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
490 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
491 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
493 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
495 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
497 mr = 3*LhsPacketSize,
499 mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
502 LhsProgress = LhsPacketSize,
506 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
507 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
508 typedef typename packet_traits<ResScalar>::type _ResPacket;
510 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
511 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
512 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
514 typedef ResPacket AccPacket;
516 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
518 p = pset1<ResPacket>(ResScalar(0));
521 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
523 dest = pset1<RhsPacket>(*b);
526 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
528 dest = pset1<RhsPacket>(*b);
531 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
533 dest = pload<LhsPacket>(a);
536 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
538 dest = ploadu<LhsPacket>(a);
541 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
543 pbroadcast4(b, b0, b1, b2, b3);
551 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
553 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
556 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
558 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
559 EIGEN_UNUSED_VARIABLE(tmp);
560 c.v = pmadd(a.v,b,c.v);
562 tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
566 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
571 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
573 r = cj.pmadd(c,alpha,r);
577 conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
580 template<
typename Packet>
587 template<
typename Packet>
588 DoublePacket<Packet> padd(
const DoublePacket<Packet> &a,
const DoublePacket<Packet> &b)
590 DoublePacket<Packet> res;
591 res.first = padd(a.first, b.first);
592 res.second = padd(a.second,b.second);
596 template<
typename Packet>
597 const DoublePacket<Packet>& predux4(
const DoublePacket<Packet> &a)
602 template<
typename Packet>
struct unpacket_traits<DoublePacket<Packet> > {
typedef DoublePacket<Packet> half; };
612 template<
typename RealScalar,
bool _ConjLhs,
bool _ConjRhs>
613 class gebp_traits<
std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
616 typedef std::complex<RealScalar> Scalar;
617 typedef std::complex<RealScalar> LhsScalar;
618 typedef std::complex<RealScalar> RhsScalar;
619 typedef std::complex<RealScalar> ResScalar;
624 Vectorizable = packet_traits<RealScalar>::Vectorizable
625 && packet_traits<Scalar>::Vectorizable,
626 RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
627 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
628 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
629 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
635 LhsProgress = ResPacketSize,
639 typedef typename packet_traits<RealScalar>::type RealPacket;
640 typedef typename packet_traits<Scalar>::type ScalarPacket;
641 typedef DoublePacket<RealPacket> DoublePacketType;
643 typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
644 typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
645 typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
646 typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
648 EIGEN_STRONG_INLINE
void initAcc(Scalar& p) { p = Scalar(0); }
650 EIGEN_STRONG_INLINE
void initAcc(DoublePacketType& p)
652 p.first = pset1<RealPacket>(RealScalar(0));
653 p.second = pset1<RealPacket>(RealScalar(0));
657 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, ResPacket& dest)
const
659 dest = pset1<ResPacket>(*b);
663 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, DoublePacketType& dest)
const
665 dest.first = pset1<RealPacket>(real(*b));
666 dest.second = pset1<RealPacket>(imag(*b));
669 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, ResPacket& dest)
const
673 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, DoublePacketType& dest)
const
675 eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
679 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
689 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
697 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
705 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
707 dest = pload<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
710 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
712 dest = ploadu<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
715 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, DoublePacketType& c, RhsPacket& )
const
717 c.first = padd(pmul(a,b.first), c.first);
718 c.second = padd(pmul(a,b.second),c.second);
721 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, ResPacket& c, RhsPacket& )
const
726 EIGEN_STRONG_INLINE
void acc(
const Scalar& c,
const Scalar& alpha, Scalar& r)
const { r += alpha * c; }
728 EIGEN_STRONG_INLINE
void acc(
const DoublePacketType& c,
const ResPacket& alpha, ResPacket& r)
const
732 if((!ConjLhs)&&(!ConjRhs))
734 tmp = pcplxflip(pconj(ResPacket(c.second)));
735 tmp = padd(ResPacket(c.first),tmp);
737 else if((!ConjLhs)&&(ConjRhs))
739 tmp = pconj(pcplxflip(ResPacket(c.second)));
740 tmp = padd(ResPacket(c.first),tmp);
742 else if((ConjLhs)&&(!ConjRhs))
744 tmp = pcplxflip(ResPacket(c.second));
745 tmp = padd(pconj(ResPacket(c.first)),tmp);
747 else if((ConjLhs)&&(ConjRhs))
749 tmp = pcplxflip(ResPacket(c.second));
750 tmp = psub(pconj(ResPacket(c.first)),tmp);
753 r = pmadd(tmp,alpha,r);
757 conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
760 template<
typename RealScalar,
bool _ConjRhs>
761 class gebp_traits<RealScalar,
std::complex<RealScalar>, false, _ConjRhs >
764 typedef std::complex<RealScalar> Scalar;
765 typedef RealScalar LhsScalar;
766 typedef Scalar RhsScalar;
767 typedef Scalar ResScalar;
772 Vectorizable = packet_traits<RealScalar>::Vectorizable
773 && packet_traits<Scalar>::Vectorizable,
774 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
775 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
776 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
778 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
781 mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
783 LhsProgress = ResPacketSize,
787 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
788 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
789 typedef typename packet_traits<ResScalar>::type _ResPacket;
791 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
792 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
793 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
795 typedef ResPacket AccPacket;
797 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
799 p = pset1<ResPacket>(ResScalar(0));
802 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
804 dest = pset1<RhsPacket>(*b);
807 void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
809 pbroadcast4(b, b0, b1, b2, b3);
819 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
821 dest = ploaddup<LhsPacket>(a);
824 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
826 eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
830 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
832 dest = ploaddup<LhsPacket>(a);
835 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
837 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
840 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
842 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
843 EIGEN_UNUSED_VARIABLE(tmp);
844 c.v = pmadd(a,b.v,c.v);
846 tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
851 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
856 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
858 r = cj.pmadd(alpha,c,r);
862 conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
866 template <
typename GebpKernel,
bool UseRotatingKernel = GebpKernel::UseRotatingKernel>
867 struct PossiblyRotatingKernelHelper
871 typedef typename GebpKernel::Traits Traits;
872 typedef typename Traits::RhsScalar RhsScalar;
873 typedef typename Traits::RhsPacket RhsPacket;
874 typedef typename Traits::AccPacket AccPacket;
876 const Traits& traits;
877 PossiblyRotatingKernelHelper(
const Traits& t) : traits(t) {}
880 template <
size_t K,
size_t Index>
881 void loadOrRotateRhs(RhsPacket& to,
const RhsScalar* from)
const
883 traits.loadRhs(from + (Index+4*K)*Traits::RhsProgress, to);
886 void unrotateResult(AccPacket&,
895 template <
typename GebpKernel>
896 struct PossiblyRotatingKernelHelper<GebpKernel, true>
898 typedef typename GebpKernel::Traits Traits;
899 typedef typename Traits::RhsScalar RhsScalar;
900 typedef typename Traits::RhsPacket RhsPacket;
901 typedef typename Traits::AccPacket AccPacket;
903 const Traits& traits;
904 PossiblyRotatingKernelHelper(
const Traits& t) : traits(t) {}
906 template <
size_t K,
size_t Index>
907 void loadOrRotateRhs(RhsPacket& to,
const RhsScalar* from)
const
910 to = pload<RhsPacket>(from + 4*K*Traits::RhsProgress);
912 EIGEN_ASM_COMMENT(
"Do not reorder code, we're very tight on registers");
917 void unrotateResult(AccPacket& res0,
922 PacketBlock<AccPacket> resblock;
923 resblock.packet[0] = res0;
924 resblock.packet[1] = res1;
925 resblock.packet[2] = res2;
926 resblock.packet[3] = res3;
927 ptranspose(resblock);
928 resblock.packet[3] = protate<1>(resblock.packet[3]);
929 resblock.packet[2] = protate<2>(resblock.packet[2]);
930 resblock.packet[1] = protate<3>(resblock.packet[1]);
931 ptranspose(resblock);
932 res0 = resblock.packet[0];
933 res1 = resblock.packet[1];
934 res2 = resblock.packet[2];
935 res3 = resblock.packet[3];
946 template<
typename LhsScalar,
typename RhsScalar,
typename Index,
typename DataMapper,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
949 typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
950 typedef typename Traits::ResScalar ResScalar;
951 typedef typename Traits::LhsPacket LhsPacket;
952 typedef typename Traits::RhsPacket RhsPacket;
953 typedef typename Traits::ResPacket ResPacket;
954 typedef typename Traits::AccPacket AccPacket;
956 typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
957 typedef typename SwappedTraits::ResScalar SResScalar;
958 typedef typename SwappedTraits::LhsPacket SLhsPacket;
959 typedef typename SwappedTraits::RhsPacket SRhsPacket;
960 typedef typename SwappedTraits::ResPacket SResPacket;
961 typedef typename SwappedTraits::AccPacket SAccPacket;
963 typedef typename DataMapper::LinearMapper LinearMapper;
966 Vectorizable = Traits::Vectorizable,
967 LhsProgress = Traits::LhsProgress,
968 RhsProgress = Traits::RhsProgress,
969 ResPacketSize = Traits::ResPacketSize
973 static const bool UseRotatingKernel =
975 internal::is_same<LhsScalar, float>::value &&
976 internal::is_same<RhsScalar, float>::value &&
977 internal::is_same<ResScalar, float>::value &&
978 Traits::LhsPacketSize == 4 &&
979 Traits::RhsPacketSize == 4 &&
980 Traits::ResPacketSize == 4;
983 void operator()(
const DataMapper& res,
const LhsScalar* blockA,
const RhsScalar* blockB,
984 Index rows, Index depth, Index cols, ResScalar alpha,
985 Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
988 template<
typename LhsScalar,
typename RhsScalar,
typename Index,
typename DataMapper,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
990 void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
991 ::operator()(
const DataMapper& res,
const LhsScalar* blockA,
const RhsScalar* blockB,
992 Index rows, Index depth, Index cols, ResScalar alpha,
993 Index strideA, Index strideB, Index offsetA, Index offsetB)
996 SwappedTraits straits;
998 if(strideA==-1) strideA = depth;
999 if(strideB==-1) strideB = depth;
1000 conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
1001 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
1002 const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
1003 const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
1004 const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
1006 const Index peeled_kc = depth & ~(pk-1);
1007 const Index prefetch_res_offset = 32/
sizeof(ResScalar);
1013 if(mr>=3*Traits::LhsProgress)
1015 PossiblyRotatingKernelHelper<gebp_kernel> possiblyRotatingKernelHelper(traits);
1022 const Index l1 = defaultL1CacheSize;
1026 const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 -
sizeof(ResScalar)*mr*nr - depth*nr*
sizeof(RhsScalar)) / (depth *
sizeof(LhsScalar) * 3*LhsProgress) ));
1027 for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)
1029 const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);
1030 for(Index j2=0; j2<packet_cols4; j2+=nr)
1032 for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
1038 const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];
1042 AccPacket C0, C1, C2, C3,
1045 traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
1046 traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
1047 traits.initAcc(C8); traits.initAcc(C9); traits.initAcc(C10); traits.initAcc(C11);
1049 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1050 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1051 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1052 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1060 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1064 for(Index k=0; k<peeled_kc; k+=pk)
1066 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 3pX4");
1070 #define EIGEN_GEBP_ONESTEP(K) \
1072 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
1073 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1074 internal::prefetch(blA+(3*K+16)*LhsProgress); \
1075 if (EIGEN_ARCH_ARM) internal::prefetch(blB+(4*K+16)*RhsProgress); \
1076 traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
1077 traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
1078 traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
1079 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 0>(B_0, blB); \
1080 traits.madd(A0, B_0, C0, T0); \
1081 traits.madd(A1, B_0, C4, T0); \
1082 traits.madd(A2, B_0, C8, B_0); \
1083 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 1>(B_0, blB); \
1084 traits.madd(A0, B_0, C1, T0); \
1085 traits.madd(A1, B_0, C5, T0); \
1086 traits.madd(A2, B_0, C9, B_0); \
1087 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 2>(B_0, blB); \
1088 traits.madd(A0, B_0, C2, T0); \
1089 traits.madd(A1, B_0, C6, T0); \
1090 traits.madd(A2, B_0, C10, B_0); \
1091 possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 3>(B_0, blB); \
1092 traits.madd(A0, B_0, C3 , T0); \
1093 traits.madd(A1, B_0, C7, T0); \
1094 traits.madd(A2, B_0, C11, B_0); \
1095 EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
1098 internal::prefetch(blB);
1099 EIGEN_GEBP_ONESTEP(0);
1100 EIGEN_GEBP_ONESTEP(1);
1101 EIGEN_GEBP_ONESTEP(2);
1102 EIGEN_GEBP_ONESTEP(3);
1103 EIGEN_GEBP_ONESTEP(4);
1104 EIGEN_GEBP_ONESTEP(5);
1105 EIGEN_GEBP_ONESTEP(6);
1106 EIGEN_GEBP_ONESTEP(7);
1108 blB += pk*4*RhsProgress;
1109 blA += pk*3*Traits::LhsProgress;
1111 EIGEN_ASM_COMMENT(
"end gebp micro kernel 3pX4");
1114 for(Index k=peeled_kc; k<depth; k++)
1118 EIGEN_GEBP_ONESTEP(0);
1119 blB += 4*RhsProgress;
1120 blA += 3*Traits::LhsProgress;
1123 #undef EIGEN_GEBP_ONESTEP
1125 possiblyRotatingKernelHelper.unrotateResult(C0, C1, C2, C3);
1126 possiblyRotatingKernelHelper.unrotateResult(C4, C5, C6, C7);
1127 possiblyRotatingKernelHelper.unrotateResult(C8, C9, C10, C11);
1129 ResPacket R0, R1, R2;
1130 ResPacket alphav = pset1<ResPacket>(alpha);
1132 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1133 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1134 R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1135 traits.acc(C0, alphav, R0);
1136 traits.acc(C4, alphav, R1);
1137 traits.acc(C8, alphav, R2);
1138 r0.storePacket(0 * Traits::ResPacketSize, R0);
1139 r0.storePacket(1 * Traits::ResPacketSize, R1);
1140 r0.storePacket(2 * Traits::ResPacketSize, R2);
1142 R0 = r1.loadPacket(0 * Traits::ResPacketSize);
1143 R1 = r1.loadPacket(1 * Traits::ResPacketSize);
1144 R2 = r1.loadPacket(2 * Traits::ResPacketSize);
1145 traits.acc(C1, alphav, R0);
1146 traits.acc(C5, alphav, R1);
1147 traits.acc(C9, alphav, R2);
1148 r1.storePacket(0 * Traits::ResPacketSize, R0);
1149 r1.storePacket(1 * Traits::ResPacketSize, R1);
1150 r1.storePacket(2 * Traits::ResPacketSize, R2);
1152 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1153 R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1154 R2 = r2.loadPacket(2 * Traits::ResPacketSize);
1155 traits.acc(C2, alphav, R0);
1156 traits.acc(C6, alphav, R1);
1157 traits.acc(C10, alphav, R2);
1158 r2.storePacket(0 * Traits::ResPacketSize, R0);
1159 r2.storePacket(1 * Traits::ResPacketSize, R1);
1160 r2.storePacket(2 * Traits::ResPacketSize, R2);
1162 R0 = r3.loadPacket(0 * Traits::ResPacketSize);
1163 R1 = r3.loadPacket(1 * Traits::ResPacketSize);
1164 R2 = r3.loadPacket(2 * Traits::ResPacketSize);
1165 traits.acc(C3, alphav, R0);
1166 traits.acc(C7, alphav, R1);
1167 traits.acc(C11, alphav, R2);
1168 r3.storePacket(0 * Traits::ResPacketSize, R0);
1169 r3.storePacket(1 * Traits::ResPacketSize, R1);
1170 r3.storePacket(2 * Traits::ResPacketSize, R2);
1175 for(Index j2=packet_cols4; j2<cols; j2++)
1177 for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
1180 const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
1184 AccPacket C0, C4, C8;
1189 LinearMapper r0 = res.getLinearMapper(i, j2);
1193 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1194 LhsPacket A0, A1, A2;
1196 for(Index k=0; k<peeled_kc; k+=pk)
1198 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 3pX1");
1200 #define EIGEN_GEBGP_ONESTEP(K) \
1202 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
1203 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1204 traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
1205 traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
1206 traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
1207 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1208 traits.madd(A0, B_0, C0, B_0); \
1209 traits.madd(A1, B_0, C4, B_0); \
1210 traits.madd(A2, B_0, C8, B_0); \
1211 EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
1214 EIGEN_GEBGP_ONESTEP(0);
1215 EIGEN_GEBGP_ONESTEP(1);
1216 EIGEN_GEBGP_ONESTEP(2);
1217 EIGEN_GEBGP_ONESTEP(3);
1218 EIGEN_GEBGP_ONESTEP(4);
1219 EIGEN_GEBGP_ONESTEP(5);
1220 EIGEN_GEBGP_ONESTEP(6);
1221 EIGEN_GEBGP_ONESTEP(7);
1223 blB += pk*RhsProgress;
1224 blA += pk*3*Traits::LhsProgress;
1226 EIGEN_ASM_COMMENT(
"end gebp micro kernel 3pX1");
1230 for(Index k=peeled_kc; k<depth; k++)
1233 EIGEN_GEBGP_ONESTEP(0);
1235 blA += 3*Traits::LhsProgress;
1237 #undef EIGEN_GEBGP_ONESTEP
1238 ResPacket R0, R1, R2;
1239 ResPacket alphav = pset1<ResPacket>(alpha);
1241 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1242 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1243 R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1244 traits.acc(C0, alphav, R0);
1245 traits.acc(C4, alphav, R1);
1246 traits.acc(C8, alphav, R2);
1247 r0.storePacket(0 * Traits::ResPacketSize, R0);
1248 r0.storePacket(1 * Traits::ResPacketSize, R1);
1249 r0.storePacket(2 * Traits::ResPacketSize, R2);
1256 if(mr>=2*Traits::LhsProgress)
1258 const Index l1 = defaultL1CacheSize;
1262 Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 -
sizeof(ResScalar)*mr*nr - depth*nr*
sizeof(RhsScalar)) / (depth *
sizeof(LhsScalar) * 2*LhsProgress) ));
1264 for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)
1266 Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);
1267 for(Index j2=0; j2<packet_cols4; j2+=nr)
1269 for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1275 const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1279 AccPacket C0, C1, C2, C3,
1281 traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
1282 traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
1284 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1285 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1286 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1287 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1289 r0.prefetch(prefetch_res_offset);
1290 r1.prefetch(prefetch_res_offset);
1291 r2.prefetch(prefetch_res_offset);
1292 r3.prefetch(prefetch_res_offset);
1295 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1299 for(Index k=0; k<peeled_kc; k+=pk)
1301 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 2pX4");
1302 RhsPacket B_0, B1, B2, B3, T0;
1304 #define EIGEN_GEBGP_ONESTEP(K) \
1306 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
1307 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1308 traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
1309 traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
1310 traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
1311 traits.madd(A0, B_0, C0, T0); \
1312 traits.madd(A1, B_0, C4, B_0); \
1313 traits.madd(A0, B1, C1, T0); \
1314 traits.madd(A1, B1, C5, B1); \
1315 traits.madd(A0, B2, C2, T0); \
1316 traits.madd(A1, B2, C6, B2); \
1317 traits.madd(A0, B3, C3, T0); \
1318 traits.madd(A1, B3, C7, B3); \
1319 EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
1322 internal::prefetch(blB+(48+0));
1323 EIGEN_GEBGP_ONESTEP(0);
1324 EIGEN_GEBGP_ONESTEP(1);
1325 EIGEN_GEBGP_ONESTEP(2);
1326 EIGEN_GEBGP_ONESTEP(3);
1327 internal::prefetch(blB+(48+16));
1328 EIGEN_GEBGP_ONESTEP(4);
1329 EIGEN_GEBGP_ONESTEP(5);
1330 EIGEN_GEBGP_ONESTEP(6);
1331 EIGEN_GEBGP_ONESTEP(7);
1333 blB += pk*4*RhsProgress;
1334 blA += pk*(2*Traits::LhsProgress);
1336 EIGEN_ASM_COMMENT(
"end gebp micro kernel 2pX4");
1339 for(Index k=peeled_kc; k<depth; k++)
1341 RhsPacket B_0, B1, B2, B3, T0;
1342 EIGEN_GEBGP_ONESTEP(0);
1343 blB += 4*RhsProgress;
1344 blA += 2*Traits::LhsProgress;
1346 #undef EIGEN_GEBGP_ONESTEP
1348 ResPacket R0, R1, R2, R3;
1349 ResPacket alphav = pset1<ResPacket>(alpha);
1351 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1352 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1353 R2 = r1.loadPacket(0 * Traits::ResPacketSize);
1354 R3 = r1.loadPacket(1 * Traits::ResPacketSize);
1355 traits.acc(C0, alphav, R0);
1356 traits.acc(C4, alphav, R1);
1357 traits.acc(C1, alphav, R2);
1358 traits.acc(C5, alphav, R3);
1359 r0.storePacket(0 * Traits::ResPacketSize, R0);
1360 r0.storePacket(1 * Traits::ResPacketSize, R1);
1361 r1.storePacket(0 * Traits::ResPacketSize, R2);
1362 r1.storePacket(1 * Traits::ResPacketSize, R3);
1364 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1365 R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1366 R2 = r3.loadPacket(0 * Traits::ResPacketSize);
1367 R3 = r3.loadPacket(1 * Traits::ResPacketSize);
1368 traits.acc(C2, alphav, R0);
1369 traits.acc(C6, alphav, R1);
1370 traits.acc(C3, alphav, R2);
1371 traits.acc(C7, alphav, R3);
1372 r2.storePacket(0 * Traits::ResPacketSize, R0);
1373 r2.storePacket(1 * Traits::ResPacketSize, R1);
1374 r3.storePacket(0 * Traits::ResPacketSize, R2);
1375 r3.storePacket(1 * Traits::ResPacketSize, R3);
1380 for(Index j2=packet_cols4; j2<cols; j2++)
1382 for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1385 const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1393 LinearMapper r0 = res.getLinearMapper(i, j2);
1394 r0.prefetch(prefetch_res_offset);
1397 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1400 for(Index k=0; k<peeled_kc; k+=pk)
1402 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 2pX1");
1405 #define EIGEN_GEBGP_ONESTEP(K) \
1407 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1"); \
1408 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1409 traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
1410 traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
1411 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1412 traits.madd(A0, B_0, C0, B1); \
1413 traits.madd(A1, B_0, C4, B_0); \
1414 EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1"); \
1417 EIGEN_GEBGP_ONESTEP(0);
1418 EIGEN_GEBGP_ONESTEP(1);
1419 EIGEN_GEBGP_ONESTEP(2);
1420 EIGEN_GEBGP_ONESTEP(3);
1421 EIGEN_GEBGP_ONESTEP(4);
1422 EIGEN_GEBGP_ONESTEP(5);
1423 EIGEN_GEBGP_ONESTEP(6);
1424 EIGEN_GEBGP_ONESTEP(7);
1426 blB += pk*RhsProgress;
1427 blA += pk*2*Traits::LhsProgress;
1429 EIGEN_ASM_COMMENT(
"end gebp micro kernel 2pX1");
1433 for(Index k=peeled_kc; k<depth; k++)
1436 EIGEN_GEBGP_ONESTEP(0);
1438 blA += 2*Traits::LhsProgress;
1440 #undef EIGEN_GEBGP_ONESTEP
1442 ResPacket alphav = pset1<ResPacket>(alpha);
1444 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1445 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1446 traits.acc(C0, alphav, R0);
1447 traits.acc(C4, alphav, R1);
1448 r0.storePacket(0 * Traits::ResPacketSize, R0);
1449 r0.storePacket(1 * Traits::ResPacketSize, R1);
1455 if(mr>=1*Traits::LhsProgress)
1458 for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
1461 for(Index j2=0; j2<packet_cols4; j2+=nr)
1466 const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1470 AccPacket C0, C1, C2, C3;
1476 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1477 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1478 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1479 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1481 r0.prefetch(prefetch_res_offset);
1482 r1.prefetch(prefetch_res_offset);
1483 r2.prefetch(prefetch_res_offset);
1484 r3.prefetch(prefetch_res_offset);
1487 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1491 for(Index k=0; k<peeled_kc; k+=pk)
1493 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 1pX4");
1494 RhsPacket B_0, B1, B2, B3;
1496 #define EIGEN_GEBGP_ONESTEP(K) \
1498 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4"); \
1499 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1500 traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
1501 traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
1502 traits.madd(A0, B_0, C0, B_0); \
1503 traits.madd(A0, B1, C1, B1); \
1504 traits.madd(A0, B2, C2, B2); \
1505 traits.madd(A0, B3, C3, B3); \
1506 EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4"); \
1509 internal::prefetch(blB+(48+0));
1510 EIGEN_GEBGP_ONESTEP(0);
1511 EIGEN_GEBGP_ONESTEP(1);
1512 EIGEN_GEBGP_ONESTEP(2);
1513 EIGEN_GEBGP_ONESTEP(3);
1514 internal::prefetch(blB+(48+16));
1515 EIGEN_GEBGP_ONESTEP(4);
1516 EIGEN_GEBGP_ONESTEP(5);
1517 EIGEN_GEBGP_ONESTEP(6);
1518 EIGEN_GEBGP_ONESTEP(7);
1520 blB += pk*4*RhsProgress;
1521 blA += pk*1*LhsProgress;
1523 EIGEN_ASM_COMMENT(
"end gebp micro kernel 1pX4");
1526 for(Index k=peeled_kc; k<depth; k++)
1528 RhsPacket B_0, B1, B2, B3;
1529 EIGEN_GEBGP_ONESTEP(0);
1530 blB += 4*RhsProgress;
1531 blA += 1*LhsProgress;
1533 #undef EIGEN_GEBGP_ONESTEP
1536 ResPacket alphav = pset1<ResPacket>(alpha);
1538 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1539 R1 = r1.loadPacket(0 * Traits::ResPacketSize);
1540 traits.acc(C0, alphav, R0);
1541 traits.acc(C1, alphav, R1);
1542 r0.storePacket(0 * Traits::ResPacketSize, R0);
1543 r1.storePacket(0 * Traits::ResPacketSize, R1);
1545 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1546 R1 = r3.loadPacket(0 * Traits::ResPacketSize);
1547 traits.acc(C2, alphav, R0);
1548 traits.acc(C3, alphav, R1);
1549 r2.storePacket(0 * Traits::ResPacketSize, R0);
1550 r3.storePacket(0 * Traits::ResPacketSize, R1);
1554 for(Index j2=packet_cols4; j2<cols; j2++)
1557 const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1564 LinearMapper r0 = res.getLinearMapper(i, j2);
1567 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1570 for(Index k=0; k<peeled_kc; k+=pk)
1572 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 1pX1");
1575 #define EIGEN_GEBGP_ONESTEP(K) \
1577 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1"); \
1578 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1579 traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
1580 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1581 traits.madd(A0, B_0, C0, B_0); \
1582 EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1"); \
1585 EIGEN_GEBGP_ONESTEP(0);
1586 EIGEN_GEBGP_ONESTEP(1);
1587 EIGEN_GEBGP_ONESTEP(2);
1588 EIGEN_GEBGP_ONESTEP(3);
1589 EIGEN_GEBGP_ONESTEP(4);
1590 EIGEN_GEBGP_ONESTEP(5);
1591 EIGEN_GEBGP_ONESTEP(6);
1592 EIGEN_GEBGP_ONESTEP(7);
1594 blB += pk*RhsProgress;
1595 blA += pk*1*Traits::LhsProgress;
1597 EIGEN_ASM_COMMENT(
"end gebp micro kernel 1pX1");
1601 for(Index k=peeled_kc; k<depth; k++)
1604 EIGEN_GEBGP_ONESTEP(0);
1606 blA += 1*Traits::LhsProgress;
1608 #undef EIGEN_GEBGP_ONESTEP
1610 ResPacket alphav = pset1<ResPacket>(alpha);
1611 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1612 traits.acc(C0, alphav, R0);
1613 r0.storePacket(0 * Traits::ResPacketSize, R0);
1621 for(Index j2=0; j2<packet_cols4; j2+=nr)
1624 for(Index i=peeled_mc1; i<rows; i+=1)
1626 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1628 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1630 if( (SwappedTraits::LhsProgress % 4)==0 )
1633 SAccPacket C0, C1, C2, C3;
1634 straits.initAcc(C0);
1635 straits.initAcc(C1);
1636 straits.initAcc(C2);
1637 straits.initAcc(C3);
1639 const Index spk = (std::max)(1,SwappedTraits::LhsProgress/4);
1640 const Index endk = (depth/spk)*spk;
1641 const Index endk4 = (depth/(spk*4))*(spk*4);
1644 for(; k<endk4; k+=4*spk)
1649 straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
1650 straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);
1652 straits.loadRhsQuad(blA+0*spk, B_0);
1653 straits.loadRhsQuad(blA+1*spk, B_1);
1654 straits.madd(A0,B_0,C0,B_0);
1655 straits.madd(A1,B_1,C1,B_1);
1657 straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
1658 straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
1659 straits.loadRhsQuad(blA+2*spk, B_0);
1660 straits.loadRhsQuad(blA+3*spk, B_1);
1661 straits.madd(A0,B_0,C2,B_0);
1662 straits.madd(A1,B_1,C3,B_1);
1664 blB += 4*SwappedTraits::LhsProgress;
1667 C0 = padd(padd(C0,C1),padd(C2,C3));
1668 for(; k<endk; k+=spk)
1673 straits.loadLhsUnaligned(blB, A0);
1674 straits.loadRhsQuad(blA, B_0);
1675 straits.madd(A0,B_0,C0,B_0);
1677 blB += SwappedTraits::LhsProgress;
1680 if(SwappedTraits::LhsProgress==8)
1683 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
1684 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
1685 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
1686 typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
1688 SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
1689 SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);
1696 straits.loadLhsUnaligned(blB, a0);
1697 straits.loadRhs(blA, b0);
1698 SAccPacketHalf c0 = predux4(C0);
1699 straits.madd(a0,b0,c0,b0);
1700 straits.acc(c0, alphav, R);
1704 straits.acc(predux4(C0), alphav, R);
1706 res.scatterPacket(i, j2, R);
1710 SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
1711 SResPacket alphav = pset1<SResPacket>(alpha);
1712 straits.acc(C0, alphav, R);
1713 res.scatterPacket(i, j2, R);
1719 ResScalar C0(0), C1(0), C2(0), C3(0);
1721 for(Index k=0; k<depth; k++)
1730 CJMADD(cj,A0,B_0,C0, B_0);
1731 CJMADD(cj,A0,B_1,C1, B_1);
1735 CJMADD(cj,A0,B_0,C2, B_0);
1736 CJMADD(cj,A0,B_1,C3, B_1);
1740 res(i, j2 + 0) += alpha * C0;
1741 res(i, j2 + 1) += alpha * C1;
1742 res(i, j2 + 2) += alpha * C2;
1743 res(i, j2 + 3) += alpha * C3;
1748 for(Index j2=packet_cols4; j2<cols; j2++)
1751 for(Index i=peeled_mc1; i<rows; i+=1)
1753 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1757 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1758 for(Index k=0; k<depth; k++)
1760 LhsScalar A0 = blA[k];
1761 RhsScalar B_0 = blB[k];
1762 CJMADD(cj, A0, B_0, C0, B_0);
1764 res(i, j2) += alpha * C0;
1787 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1788 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2,
ColMajor, Conjugate, PanelMode>
1790 typedef typename DataMapper::LinearMapper LinearMapper;
1791 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
1794 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1795 EIGEN_DONT_INLINE
void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
1796 ::operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
1798 typedef typename packet_traits<Scalar>::type Packet;
1799 enum { PacketSize = packet_traits<Scalar>::size };
1801 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1802 EIGEN_UNUSED_VARIABLE(stride);
1803 EIGEN_UNUSED_VARIABLE(offset);
1804 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1805 eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
1806 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1809 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
1810 const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
1811 const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
1812 const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
1813 : Pack2>1 ? (rows/Pack2)*Pack2 : 0;
1818 if(Pack1>=3*PacketSize)
1820 for(; i<peeled_mc3; i+=3*PacketSize)
1822 if(PanelMode) count += (3*PacketSize) * offset;
1824 for(Index k=0; k<depth; k++)
1827 A = lhs.loadPacket(i+0*PacketSize, k);
1828 B = lhs.loadPacket(i+1*PacketSize, k);
1829 C = lhs.loadPacket(i+2*PacketSize, k);
1830 pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1831 pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1832 pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
1834 if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
1838 if(Pack1>=2*PacketSize)
1840 for(; i<peeled_mc2; i+=2*PacketSize)
1842 if(PanelMode) count += (2*PacketSize) * offset;
1844 for(Index k=0; k<depth; k++)
1847 A = lhs.loadPacket(i+0*PacketSize, k);
1848 B = lhs.loadPacket(i+1*PacketSize, k);
1849 pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1850 pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1852 if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
1856 if(Pack1>=1*PacketSize)
1858 for(; i<peeled_mc1; i+=1*PacketSize)
1860 if(PanelMode) count += (1*PacketSize) * offset;
1862 for(Index k=0; k<depth; k++)
1865 A = lhs.loadPacket(i+0*PacketSize, k);
1866 pstore(blockA+count, cj.pconj(A));
1869 if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
1873 if(Pack2<PacketSize && Pack2>1)
1875 for(; i<peeled_mc0; i+=Pack2)
1877 if(PanelMode) count += Pack2 * offset;
1879 for(Index k=0; k<depth; k++)
1880 for(Index w=0; w<Pack2; w++)
1881 blockA[count++] = cj(lhs(i+w, k));
1883 if(PanelMode) count += Pack2 * (stride-offset-depth);
1888 if(PanelMode) count += offset;
1889 for(Index k=0; k<depth; k++)
1890 blockA[count++] = cj(lhs(i, k));
1891 if(PanelMode) count += (stride-offset-depth);
1895 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1896 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2,
RowMajor, Conjugate, PanelMode>
1898 typedef typename DataMapper::LinearMapper LinearMapper;
1899 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
1902 template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1903 EIGEN_DONT_INLINE
void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
1904 ::operator()(Scalar* blockA,
const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
1906 typedef typename packet_traits<Scalar>::type Packet;
1907 enum { PacketSize = packet_traits<Scalar>::size };
1909 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1910 EIGEN_UNUSED_VARIABLE(stride);
1911 EIGEN_UNUSED_VARIABLE(offset);
1912 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1913 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1924 Index remaining_rows = rows-i;
1925 Index peeled_mc = i+(remaining_rows/pack)*pack;
1926 for(; i<peeled_mc; i+=pack)
1928 if(PanelMode) count += pack * offset;
1930 const Index peeled_k = (depth/PacketSize)*PacketSize;
1932 if(pack>=PacketSize)
1934 for(; k<peeled_k; k+=PacketSize)
1936 for (Index m = 0; m < pack; m += PacketSize)
1938 PacketBlock<Packet> kernel;
1939 for (
int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
1941 for (
int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
1943 count += PacketSize*pack;
1949 for(; w<pack-3; w+=4)
1951 Scalar a(cj(lhs(i+w+0, k))),
1952 b(cj(lhs(i+w+1, k))),
1953 c(cj(lhs(i+w+2, k))),
1954 d(cj(lhs(i+w+3, k)));
1955 blockA[count++] = a;
1956 blockA[count++] = b;
1957 blockA[count++] = c;
1958 blockA[count++] = d;
1962 blockA[count++] = cj(lhs(i+w, k));
1965 if(PanelMode) count += pack * (stride-offset-depth);
1969 if(pack<Pack2 && (pack+PacketSize)!=Pack2)
1975 if(PanelMode) count += offset;
1976 for(Index k=0; k<depth; k++)
1977 blockA[count++] = cj(lhs(i, k));
1978 if(PanelMode) count += (stride-offset-depth);
1989 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
1990 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr,
ColMajor, Conjugate, PanelMode>
1992 typedef typename packet_traits<Scalar>::type Packet;
1993 typedef typename DataMapper::LinearMapper LinearMapper;
1994 enum { PacketSize = packet_traits<Scalar>::size };
1995 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
1998 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
1999 EIGEN_DONT_INLINE
void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
2000 ::operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
2002 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS COLMAJOR");
2003 EIGEN_UNUSED_VARIABLE(stride);
2004 EIGEN_UNUSED_VARIABLE(offset);
2005 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
2006 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2007 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
2008 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
2010 const Index peeled_k = (depth/PacketSize)*PacketSize;
2059 for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
2062 if(PanelMode) count += 4 * offset;
2063 const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
2064 const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
2065 const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
2066 const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
2069 if((PacketSize%4)==0)
2071 for(; k<peeled_k; k+=PacketSize) {
2072 PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
2073 kernel.packet[0] = dm0.loadPacket(k);
2074 kernel.packet[1%PacketSize] = dm1.loadPacket(k);
2075 kernel.packet[2%PacketSize] = dm2.loadPacket(k);
2076 kernel.packet[3%PacketSize] = dm3.loadPacket(k);
2078 pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
2079 pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
2080 pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));
2081 pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));
2082 count+=4*PacketSize;
2087 blockB[count+0] = cj(dm0(k));
2088 blockB[count+1] = cj(dm1(k));
2089 blockB[count+2] = cj(dm2(k));
2090 blockB[count+3] = cj(dm3(k));
2094 if(PanelMode) count += 4 * (stride-offset-depth);
2099 for(Index j2=packet_cols4; j2<cols; ++j2)
2101 if(PanelMode) count += offset;
2102 const LinearMapper dm0 = rhs.getLinearMapper(0, j2);
2103 for(Index k=0; k<depth; k++)
2105 blockB[count] = cj(dm0(k));
2108 if(PanelMode) count += (stride-offset-depth);
2113 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2114 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr,
RowMajor, Conjugate, PanelMode>
2116 typedef typename packet_traits<Scalar>::type Packet;
2117 typedef typename DataMapper::LinearMapper LinearMapper;
2118 enum { PacketSize = packet_traits<Scalar>::size };
2119 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
2122 template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2123 EIGEN_DONT_INLINE
void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
2124 ::operator()(Scalar* blockB,
const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
2126 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS ROWMAJOR");
2127 EIGEN_UNUSED_VARIABLE(stride);
2128 EIGEN_UNUSED_VARIABLE(offset);
2129 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
2130 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2131 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
2132 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
2170 for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
2173 if(PanelMode) count += 4 * offset;
2174 for(Index k=0; k<depth; k++)
2176 if (PacketSize==4) {
2177 Packet A = rhs.loadPacket(k, j2);
2178 pstoreu(blockB+count, cj.pconj(A));
2179 count += PacketSize;
2181 const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
2182 blockB[count+0] = cj(dm0(0));
2183 blockB[count+1] = cj(dm0(1));
2184 blockB[count+2] = cj(dm0(2));
2185 blockB[count+3] = cj(dm0(3));
2190 if(PanelMode) count += 4 * (stride-offset-depth);
2194 for(Index j2=packet_cols4; j2<cols; ++j2)
2196 if(PanelMode) count += offset;
2197 for(Index k=0; k<depth; k++)
2199 blockB[count] = cj(rhs(k, j2));
2202 if(PanelMode) count += stride-offset-depth;
2210 inline std::ptrdiff_t l1CacheSize()
2212 std::ptrdiff_t l1, l2, l3;
2213 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2219 inline std::ptrdiff_t l2CacheSize()
2221 std::ptrdiff_t l1, l2, l3;
2222 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2231 inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
2233 internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);
2238 #endif // EIGEN_GENERAL_BLOCK_PANEL_H
Definition: Constants.h:314
Definition: StdDeque.h:58
Definition: Eigen_Colamd.h:54
Definition: Constants.h:312