10 #ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H
11 #define EIGEN_SELFADJOINT_MATRIX_MATRIX_H
18 template<
typename Scalar,
typename Index,
int Pack1,
int Pack2_dummy,
int StorageOrder>
21 template<
int BlockRows>
inline
22 void pack(Scalar* blockA,
const const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)
25 for(Index k=0; k<i; k++)
26 for(Index w=0; w<BlockRows; w++)
27 blockA[count++] = lhs(i+w,k);
30 for(Index k=i; k<i+BlockRows; k++)
32 for(Index w=0; w<h; w++)
33 blockA[count++] = numext::conj(lhs(k, i+w));
35 blockA[count++] = numext::real(lhs(k,k));
37 for(Index w=h+1; w<BlockRows; w++)
38 blockA[count++] = lhs(i+w, k);
42 for(Index k=i+BlockRows; k<cols; k++)
43 for(Index w=0; w<BlockRows; w++)
44 blockA[count++] = numext::conj(lhs(k, i+w));
46 void operator()(Scalar* blockA,
const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
48 enum { PacketSize = packet_traits<Scalar>::size };
49 const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
53 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
54 const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
55 const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
57 if(Pack1>=3*PacketSize)
58 for(Index i=0; i<peeled_mc3; i+=3*PacketSize)
59 pack<3*PacketSize>(blockA, lhs, cols, i, count);
61 if(Pack1>=2*PacketSize)
62 for(Index i=peeled_mc3; i<peeled_mc2; i+=2*PacketSize)
63 pack<2*PacketSize>(blockA, lhs, cols, i, count);
65 if(Pack1>=1*PacketSize)
66 for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)
67 pack<1*PacketSize>(blockA, lhs, cols, i, count);
70 for(Index i=peeled_mc1; i<rows; i++)
72 for(Index k=0; k<i; k++)
73 blockA[count++] = lhs(i, k);
75 blockA[count++] = numext::real(lhs(i, i));
77 for(Index k=i+1; k<cols; k++)
78 blockA[count++] = numext::conj(lhs(k, i));
83 template<
typename Scalar,
typename Index,
int nr,
int StorageOrder>
86 enum { PacketSize = packet_traits<Scalar>::size };
87 void operator()(Scalar* blockB,
const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
89 Index end_k = k2 + rows;
91 const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
92 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
93 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
96 for(Index j2=0; j2<k2; j2+=nr)
98 for(Index k=k2; k<end_k; k++)
100 blockB[count+0] = rhs(k,j2+0);
101 blockB[count+1] = rhs(k,j2+1);
104 blockB[count+2] = rhs(k,j2+2);
105 blockB[count+3] = rhs(k,j2+3);
109 blockB[count+4] = rhs(k,j2+4);
110 blockB[count+5] = rhs(k,j2+5);
111 blockB[count+6] = rhs(k,j2+6);
112 blockB[count+7] = rhs(k,j2+7);
119 Index end8 = nr>=8 ? (std::min)(k2+rows,packet_cols8) : k2;
122 for(Index j2=k2; j2<end8; j2+=8)
126 for(Index k=k2; k<j2; k++)
128 blockB[count+0] = numext::conj(rhs(j2+0,k));
129 blockB[count+1] = numext::conj(rhs(j2+1,k));
130 blockB[count+2] = numext::conj(rhs(j2+2,k));
131 blockB[count+3] = numext::conj(rhs(j2+3,k));
132 blockB[count+4] = numext::conj(rhs(j2+4,k));
133 blockB[count+5] = numext::conj(rhs(j2+5,k));
134 blockB[count+6] = numext::conj(rhs(j2+6,k));
135 blockB[count+7] = numext::conj(rhs(j2+7,k));
140 for(Index k=j2; k<j2+8; k++)
143 for (Index w=0 ; w<h; ++w)
144 blockB[count+w] = rhs(k,j2+w);
146 blockB[count+h] = numext::real(rhs(k,k));
149 for (Index w=h+1 ; w<8; ++w)
150 blockB[count+w] = numext::conj(rhs(j2+w,k));
155 for(Index k=j2+8; k<end_k; k++)
157 blockB[count+0] = rhs(k,j2+0);
158 blockB[count+1] = rhs(k,j2+1);
159 blockB[count+2] = rhs(k,j2+2);
160 blockB[count+3] = rhs(k,j2+3);
161 blockB[count+4] = rhs(k,j2+4);
162 blockB[count+5] = rhs(k,j2+5);
163 blockB[count+6] = rhs(k,j2+6);
164 blockB[count+7] = rhs(k,j2+7);
171 for(Index j2=end8; j2<(std::min)(k2+rows,packet_cols4); j2+=4)
175 for(Index k=k2; k<j2; k++)
177 blockB[count+0] = numext::conj(rhs(j2+0,k));
178 blockB[count+1] = numext::conj(rhs(j2+1,k));
179 blockB[count+2] = numext::conj(rhs(j2+2,k));
180 blockB[count+3] = numext::conj(rhs(j2+3,k));
185 for(Index k=j2; k<j2+4; k++)
188 for (Index w=0 ; w<h; ++w)
189 blockB[count+w] = rhs(k,j2+w);
191 blockB[count+h] = numext::real(rhs(k,k));
194 for (Index w=h+1 ; w<4; ++w)
195 blockB[count+w] = numext::conj(rhs(j2+w,k));
200 for(Index k=j2+4; k<end_k; k++)
202 blockB[count+0] = rhs(k,j2+0);
203 blockB[count+1] = rhs(k,j2+1);
204 blockB[count+2] = rhs(k,j2+2);
205 blockB[count+3] = rhs(k,j2+3);
214 for(Index j2=k2+rows; j2<packet_cols8; j2+=8)
216 for(Index k=k2; k<end_k; k++)
218 blockB[count+0] = numext::conj(rhs(j2+0,k));
219 blockB[count+1] = numext::conj(rhs(j2+1,k));
220 blockB[count+2] = numext::conj(rhs(j2+2,k));
221 blockB[count+3] = numext::conj(rhs(j2+3,k));
222 blockB[count+4] = numext::conj(rhs(j2+4,k));
223 blockB[count+5] = numext::conj(rhs(j2+5,k));
224 blockB[count+6] = numext::conj(rhs(j2+6,k));
225 blockB[count+7] = numext::conj(rhs(j2+7,k));
232 for(Index j2=(std::max)(packet_cols8,k2+rows); j2<packet_cols4; j2+=4)
234 for(Index k=k2; k<end_k; k++)
236 blockB[count+0] = numext::conj(rhs(j2+0,k));
237 blockB[count+1] = numext::conj(rhs(j2+1,k));
238 blockB[count+2] = numext::conj(rhs(j2+2,k));
239 blockB[count+3] = numext::conj(rhs(j2+3,k));
246 for(Index j2=packet_cols4; j2<cols; ++j2)
249 Index half = (std::min)(end_k,j2);
250 for(Index k=k2; k<half; k++)
252 blockB[count] = numext::conj(rhs(j2,k));
256 if(half==j2 && half<k2+rows)
258 blockB[count] = numext::real(rhs(j2,j2));
265 for(Index k=half+1; k<k2+rows; k++)
267 blockB[count] = rhs(k,j2);
277 template <
typename Scalar,
typename Index,
278 int LhsStorageOrder,
bool LhsSelfAdjoint,
bool ConjugateLhs,
279 int RhsStorageOrder,
bool RhsSelfAdjoint,
bool ConjugateRhs,
281 struct product_selfadjoint_matrix;
283 template <
typename Scalar,
typename Index,
284 int LhsStorageOrder,
bool LhsSelfAdjoint,
bool ConjugateLhs,
285 int RhsStorageOrder,
bool RhsSelfAdjoint,
bool ConjugateRhs>
286 struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,
RowMajor>
289 static EIGEN_STRONG_INLINE
void run(
290 Index rows, Index cols,
291 const Scalar* lhs, Index lhsStride,
292 const Scalar* rhs, Index rhsStride,
293 Scalar* res, Index resStride,
296 product_selfadjoint_matrix<Scalar, Index,
298 RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
300 LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),
302 ::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha);
306 template <
typename Scalar,
typename Index,
307 int LhsStorageOrder,
bool ConjugateLhs,
308 int RhsStorageOrder,
bool ConjugateRhs>
309 struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,
ColMajor>
312 static EIGEN_DONT_INLINE
void run(
313 Index rows, Index cols,
314 const Scalar* _lhs, Index lhsStride,
315 const Scalar* _rhs, Index rhsStride,
316 Scalar* res, Index resStride,
317 const Scalar& alpha);
320 template <
typename Scalar,
typename Index,
321 int LhsStorageOrder,
bool ConjugateLhs,
322 int RhsStorageOrder,
bool ConjugateRhs>
323 EIGEN_DONT_INLINE
void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(
324 Index rows, Index cols,
325 const Scalar* _lhs, Index lhsStride,
326 const Scalar* _rhs, Index rhsStride,
327 Scalar* _res, Index resStride,
332 typedef gebp_traits<Scalar,Scalar> Traits;
334 typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
335 typedef const_blas_data_mapper<Scalar, Index, (LhsStorageOrder == RowMajor) ? ColMajor : RowMajor> LhsTransposeMapper;
336 typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
337 typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
338 LhsMapper lhs(_lhs,lhsStride);
339 LhsTransposeMapper lhs_transpose(_lhs,lhsStride);
340 RhsMapper rhs(_rhs,rhsStride);
341 ResMapper res(_res, resStride);
346 computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc, 1);
348 kc = (std::min)(kc,mc);
350 std::size_t sizeB = kc*cols;
351 ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
352 ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
353 Scalar* blockB = allocatedBlockB;
355 gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
356 symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
357 gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
358 gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
360 for(Index k2=0; k2<size; k2+=kc)
362 const Index actual_kc = (std::min)(k2+kc,size)-k2;
367 pack_rhs(blockB, rhs.getSubMapper(k2,0), actual_kc, cols);
373 for(Index i2=0; i2<k2; i2+=mc)
375 const Index actual_mc = (std::min)(i2+mc,k2)-i2;
377 pack_lhs_transposed(blockA, lhs_transpose.getSubMapper(i2, k2), actual_kc, actual_mc);
379 gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
383 const Index actual_mc = (std::min)(k2+kc,size)-k2;
385 pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
387 gebp_kernel(res.getSubMapper(k2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
390 for(Index i2=k2+kc; i2<size; i2+=mc)
392 const Index actual_mc = (std::min)(i2+mc,size)-i2;
393 gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
394 (blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
396 gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
402 template <
typename Scalar,
typename Index,
403 int LhsStorageOrder,
bool ConjugateLhs,
404 int RhsStorageOrder,
bool ConjugateRhs>
405 struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,
ColMajor>
408 static EIGEN_DONT_INLINE
void run(
409 Index rows, Index cols,
410 const Scalar* _lhs, Index lhsStride,
411 const Scalar* _rhs, Index rhsStride,
412 Scalar* res, Index resStride,
413 const Scalar& alpha);
416 template <
typename Scalar,
typename Index,
417 int LhsStorageOrder,
bool ConjugateLhs,
418 int RhsStorageOrder,
bool ConjugateRhs>
419 EIGEN_DONT_INLINE
void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(
420 Index rows, Index cols,
421 const Scalar* _lhs, Index lhsStride,
422 const Scalar* _rhs, Index rhsStride,
423 Scalar* _res, Index resStride,
428 typedef gebp_traits<Scalar,Scalar> Traits;
430 typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
431 typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
432 LhsMapper lhs(_lhs,lhsStride);
433 ResMapper res(_res,resStride);
438 computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc, 1);
439 std::size_t sizeB = kc*cols;
440 ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
441 ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
442 Scalar* blockB = allocatedBlockB;
444 gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
445 gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
446 symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
448 for(Index k2=0; k2<size; k2+=kc)
450 const Index actual_kc = (std::min)(k2+kc,size)-k2;
452 pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
455 for(Index i2=0; i2<rows; i2+=mc)
457 const Index actual_mc = (std::min)(i2+mc,rows)-i2;
458 pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
460 gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
473 template<
typename Lhs,
int LhsMode,
typename Rhs,
int RhsMode>
474 struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,RhsMode,false>
476 typedef typename Product<Lhs,Rhs>::Scalar Scalar;
478 typedef internal::blas_traits<Lhs> LhsBlasTraits;
479 typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
480 typedef internal::blas_traits<Rhs> RhsBlasTraits;
481 typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
487 RhsIsSelfAdjoint = (RhsMode&
SelfAdjoint)==SelfAdjoint
490 template<
typename Dest>
491 static void run(Dest &dst,
const Lhs &a_lhs,
const Rhs &a_rhs,
const Scalar& alpha)
493 eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
495 typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
496 typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
498 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
499 * RhsBlasTraits::extractScalarFactor(a_rhs);
501 internal::product_selfadjoint_matrix<Scalar, Index,
503 NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,
bool(LhsBlasTraits::NeedToConjugate)),
505 NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,
bool(RhsBlasTraits::NeedToConjugate)),
508 lhs.rows(), rhs.cols(),
509 &lhs.coeffRef(0,0), lhs.outerStride(),
510 &rhs.coeffRef(0,0), rhs.outerStride(),
511 &dst.coeffRef(0,0), dst.outerStride(),
521 #endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H
Definition: Constants.h:206
Definition: Constants.h:320
const unsigned int RowMajorBit
Definition: Constants.h:61
Definition: Constants.h:322
Definition: Constants.h:220
Definition: Constants.h:204
Definition: Eigen_Colamd.h:54