Eigen  3.2.91
SparseLU.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
5 // Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 
12 #ifndef EIGEN_SPARSE_LU_H
13 #define EIGEN_SPARSE_LU_H
14 
15 namespace Eigen {
16 
17 template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::StorageIndex> > class SparseLU;
18 template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
19 template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
20 
72 template <typename _MatrixType, typename _OrderingType>
73 class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >, public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::StorageIndex>
74 {
75  protected:
77  using APIBase::m_isInitialized;
78  public:
79  using APIBase::_solve_impl;
80 
81  typedef _MatrixType MatrixType;
82  typedef _OrderingType OrderingType;
83  typedef typename MatrixType::Scalar Scalar;
84  typedef typename MatrixType::RealScalar RealScalar;
85  typedef typename MatrixType::StorageIndex StorageIndex;
87  typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> SCMatrix;
91  typedef internal::SparseLUImpl<Scalar, StorageIndex> Base;
92 
93  public:
94  SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
95  {
96  initperfvalues();
97  }
98  explicit SparseLU(const MatrixType& matrix):m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
99  {
100  initperfvalues();
101  compute(matrix);
102  }
103 
104  ~SparseLU()
105  {
106  // Free all explicit dynamic pointers
107  }
108 
109  void analyzePattern (const MatrixType& matrix);
110  void factorize (const MatrixType& matrix);
111  void simplicialfactorize(const MatrixType& matrix);
112 
117  void compute (const MatrixType& matrix)
118  {
119  // Analyze
120  analyzePattern(matrix);
121  //Factorize
122  factorize(matrix);
123  }
124 
125  inline Index rows() const { return m_mat.rows(); }
126  inline Index cols() const { return m_mat.cols(); }
128  void isSymmetric(bool sym)
129  {
130  m_symmetricmode = sym;
131  }
132 
139  SparseLUMatrixLReturnType<SCMatrix> matrixL() const
140  {
141  return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
142  }
149  SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,StorageIndex> > matrixU() const
150  {
151  return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,StorageIndex> >(m_Lstore, m_Ustore);
152  }
153 
158  inline const PermutationType& rowsPermutation() const
159  {
160  return m_perm_r;
161  }
166  inline const PermutationType& colsPermutation() const
167  {
168  return m_perm_c;
169  }
171  void setPivotThreshold(const RealScalar& thresh)
172  {
173  m_diagpivotthresh = thresh;
174  }
175 
176 #ifdef EIGEN_PARSED_BY_DOXYGEN
177 
183  template<typename Rhs>
184  inline const Solve<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const;
185 #endif // EIGEN_PARSED_BY_DOXYGEN
186 
196  {
197  eigen_assert(m_isInitialized && "Decomposition is not initialized.");
198  return m_info;
199  }
200 
204  std::string lastErrorMessage() const
205  {
206  return m_lastError;
207  }
208 
209  template<typename Rhs, typename Dest>
210  bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
211  {
212  Dest& X(X_base.derived());
213  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
214  EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
215  THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
216 
217  // Permute the right hand side to form X = Pr*B
218  // on return, X is overwritten by the computed solution
219  X.resize(B.rows(),B.cols());
220 
221  // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
222  for(Index j = 0; j < B.cols(); ++j)
223  X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
224 
225  //Forward substitution with L
226  this->matrixL().solveInPlace(X);
227  this->matrixU().solveInPlace(X);
228 
229  // Permute back the solution
230  for (Index j = 0; j < B.cols(); ++j)
231  X.col(j) = colsPermutation().inverse() * X.col(j);
232 
233  return true;
234  }
235 
246  Scalar absDeterminant()
247  {
248  using std::abs;
249  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
250  // Initialize with the determinant of the row matrix
251  Scalar det = Scalar(1.);
252  // Note that the diagonal blocks of U are stored in supernodes,
253  // which are available in the L part :)
254  for (Index j = 0; j < this->cols(); ++j)
255  {
256  for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
257  {
258  if(it.index() == j)
259  {
260  det *= abs(it.value());
261  break;
262  }
263  }
264  }
265  return det;
266  }
267 
276  Scalar logAbsDeterminant() const
277  {
278  using std::log;
279  using std::abs;
280 
281  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
282  Scalar det = Scalar(0.);
283  for (Index j = 0; j < this->cols(); ++j)
284  {
285  for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
286  {
287  if(it.row() < j) continue;
288  if(it.row() == j)
289  {
290  det += log(abs(it.value()));
291  break;
292  }
293  }
294  }
295  return det;
296  }
297 
303  {
304  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
305  // Initialize with the determinant of the row matrix
306  Index det = 1;
307  // Note that the diagonal blocks of U are stored in supernodes,
308  // which are available in the L part :)
309  for (Index j = 0; j < this->cols(); ++j)
310  {
311  for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
312  {
313  if(it.index() == j)
314  {
315  if(it.value()<0)
316  det = -det;
317  else if(it.value()==0)
318  return 0;
319  break;
320  }
321  }
322  }
323  return det * m_detPermR * m_detPermC;
324  }
325 
330  Scalar determinant()
331  {
332  eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
333  // Initialize with the determinant of the row matrix
334  Scalar det = Scalar(1.);
335  // Note that the diagonal blocks of U are stored in supernodes,
336  // which are available in the L part :)
337  for (Index j = 0; j < this->cols(); ++j)
338  {
339  for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
340  {
341  if(it.index() == j)
342  {
343  det *= it.value();
344  break;
345  }
346  }
347  }
348  return (m_detPermR * m_detPermC) > 0 ? det : -det;
349  }
350 
351  protected:
352  // Functions
353  void initperfvalues()
354  {
355  m_perfv.panel_size = 16;
356  m_perfv.relax = 1;
357  m_perfv.maxsuper = 128;
358  m_perfv.rowblk = 16;
359  m_perfv.colblk = 8;
360  m_perfv.fillfactor = 20;
361  }
362 
363  // Variables
364  mutable ComputationInfo m_info;
365  bool m_factorizationIsOk;
366  bool m_analysisIsOk;
367  std::string m_lastError;
368  NCMatrix m_mat; // The input (permuted ) matrix
369  SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
370  MappedSparseMatrix<Scalar,ColMajor,StorageIndex> m_Ustore; // The upper triangular matrix
371  PermutationType m_perm_c; // Column permutation
372  PermutationType m_perm_r ; // Row permutation
373  IndexVector m_etree; // Column elimination tree
374 
375  typename Base::GlobalLU_t m_glu;
376 
377  // SparseLU options
378  bool m_symmetricmode;
379  // values for performance
380  internal::perfvalues m_perfv;
381  RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
382  Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
383  Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
384  private:
385  // Disable copy constructor
386  SparseLU (const SparseLU& );
387 
388 }; // End class SparseLU
389 
390 
391 
392 // Functions needed by the anaysis phase
403 template <typename MatrixType, typename OrderingType>
405 {
406 
407  //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
408 
409  // Firstly, copy the whole input matrix.
410  m_mat = mat;
411 
412  // Compute fill-in ordering
413  OrderingType ord;
414  ord(m_mat,m_perm_c);
415 
416  // Apply the permutation to the column of the input matrix
417  if (m_perm_c.size())
418  {
419  m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
420  // Then, permute only the column pointers
421  ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<StorageIndex*>(mat.outerIndexPtr()):0);
422 
423  // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed.
424  if(!mat.isCompressed())
425  IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1);
426 
427  // Apply the permutation and compute the nnz per column.
428  for (Index i = 0; i < mat.cols(); i++)
429  {
430  m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
431  m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
432  }
433  }
434 
435  // Compute the column elimination tree of the permuted matrix
436  IndexVector firstRowElt;
437  internal::coletree(m_mat, m_etree,firstRowElt);
438 
439  // In symmetric mode, do not do postorder here
440  if (!m_symmetricmode) {
441  IndexVector post, iwork;
442  // Post order etree
443  internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post);
444 
445 
446  // Renumber etree in postorder
447  Index m = m_mat.cols();
448  iwork.resize(m+1);
449  for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
450  m_etree = iwork;
451 
452  // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
453  PermutationType post_perm(m);
454  for (Index i = 0; i < m; i++)
455  post_perm.indices()(i) = post(i);
456 
457  // Combine the two permutations : postorder the permutation for future use
458  if(m_perm_c.size()) {
459  m_perm_c = post_perm * m_perm_c;
460  }
461 
462  } // end postordering
463 
464  m_analysisIsOk = true;
465 }
466 
467 // Functions needed by the numerical factorization phase
468 
469 
488 template <typename MatrixType, typename OrderingType>
489 void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
490 {
491  using internal::emptyIdxLU;
492  eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
493  eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
494 
495  typedef typename IndexVector::Scalar StorageIndex;
496 
497  m_isInitialized = true;
498 
499 
500  // Apply the column permutation computed in analyzepattern()
501  // m_mat = matrix * m_perm_c.inverse();
502  m_mat = matrix;
503  if (m_perm_c.size())
504  {
505  m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
506  //Then, permute only the column pointers
507  const StorageIndex * outerIndexPtr;
508  if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
509  else
510  {
511  StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1];
512  for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
513  outerIndexPtr = outerIndexPtr_t;
514  }
515  for (Index i = 0; i < matrix.cols(); i++)
516  {
517  m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
518  m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
519  }
520  if(!matrix.isCompressed()) delete[] outerIndexPtr;
521  }
522  else
523  { //FIXME This should not be needed if the empty permutation is handled transparently
524  m_perm_c.resize(matrix.cols());
525  for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
526  }
527 
528  Index m = m_mat.rows();
529  Index n = m_mat.cols();
530  Index nnz = m_mat.nonZeros();
531  Index maxpanel = m_perfv.panel_size * m;
532  // Allocate working storage common to the factor routines
533  Index lwork = 0;
534  Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
535  if (info)
536  {
537  m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
538  m_factorizationIsOk = false;
539  return ;
540  }
541 
542  // Set up pointers for integer working arrays
543  IndexVector segrep(m); segrep.setZero();
544  IndexVector parent(m); parent.setZero();
545  IndexVector xplore(m); xplore.setZero();
546  IndexVector repfnz(maxpanel);
547  IndexVector panel_lsub(maxpanel);
548  IndexVector xprune(n); xprune.setZero();
549  IndexVector marker(m*internal::LUNoMarker); marker.setZero();
550 
551  repfnz.setConstant(-1);
552  panel_lsub.setConstant(-1);
553 
554  // Set up pointers for scalar working arrays
555  ScalarVector dense;
556  dense.setZero(maxpanel);
557  ScalarVector tempv;
558  tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
559 
560  // Compute the inverse of perm_c
561  PermutationType iperm_c(m_perm_c.inverse());
562 
563  // Identify initial relaxed snodes
564  IndexVector relax_end(n);
565  if ( m_symmetricmode == true )
566  Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
567  else
568  Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
569 
570 
571  m_perm_r.resize(m);
572  m_perm_r.indices().setConstant(-1);
573  marker.setConstant(-1);
574  m_detPermR = 1; // Record the determinant of the row permutation
575 
576  m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
577  m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
578 
579  // Work on one 'panel' at a time. A panel is one of the following :
580  // (a) a relaxed supernode at the bottom of the etree, or
581  // (b) panel_size contiguous columns, <panel_size> defined by the user
582  Index jcol;
583  IndexVector panel_histo(n);
584  Index pivrow; // Pivotal row number in the original row matrix
585  Index nseg1; // Number of segments in U-column above panel row jcol
586  Index nseg; // Number of segments in each U-column
587  Index irep;
588  Index i, k, jj;
589  for (jcol = 0; jcol < n; )
590  {
591  // Adjust panel size so that a panel won't overlap with the next relaxed snode.
592  Index panel_size = m_perfv.panel_size; // upper bound on panel width
593  for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
594  {
595  if (relax_end(k) != emptyIdxLU)
596  {
597  panel_size = k - jcol;
598  break;
599  }
600  }
601  if (k == n)
602  panel_size = n - jcol;
603 
604  // Symbolic outer factorization on a panel of columns
605  Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
606 
607  // Numeric sup-panel updates in topological order
608  Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
609 
610  // Sparse LU within the panel, and below the panel diagonal
611  for ( jj = jcol; jj< jcol + panel_size; jj++)
612  {
613  k = (jj - jcol) * m; // Column index for w-wide arrays
614 
615  nseg = nseg1; // begin after all the panel segments
616  //Depth-first-search for the current column
617  VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
618  VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
619  info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
620  if ( info )
621  {
622  m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
623  m_info = NumericalIssue;
624  m_factorizationIsOk = false;
625  return;
626  }
627  // Numeric updates to this column
628  VectorBlock<ScalarVector> dense_k(dense, k, m);
629  VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
630  info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
631  if ( info )
632  {
633  m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
634  m_info = NumericalIssue;
635  m_factorizationIsOk = false;
636  return;
637  }
638 
639  // Copy the U-segments to ucol(*)
640  info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
641  if ( info )
642  {
643  m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
644  m_info = NumericalIssue;
645  m_factorizationIsOk = false;
646  return;
647  }
648 
649  // Form the L-segment
650  info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
651  if ( info )
652  {
653  m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
654  std::ostringstream returnInfo;
655  returnInfo << info;
656  m_lastError += returnInfo.str();
657  m_info = NumericalIssue;
658  m_factorizationIsOk = false;
659  return;
660  }
661 
662  // Update the determinant of the row permutation matrix
663  // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
664  if (pivrow != jj) m_detPermR = -m_detPermR;
665 
666  // Prune columns (0:jj-1) using column jj
667  Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
668 
669  // Reset repfnz for this column
670  for (i = 0; i < nseg; i++)
671  {
672  irep = segrep(i);
673  repfnz_k(irep) = emptyIdxLU;
674  }
675  } // end SparseLU within the panel
676  jcol += panel_size; // Move to the next panel
677  } // end for -- end elimination
678 
679  m_detPermR = m_perm_r.determinant();
680  m_detPermC = m_perm_c.determinant();
681 
682  // Count the number of nonzeros in factors
683  Base::countnz(n, m_nnzL, m_nnzU, m_glu);
684  // Apply permutation to the L subscripts
685  Base::fixupL(n, m_perm_r.indices(), m_glu);
686 
687  // Create supernode matrix L
688  m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
689  // Create the column major upper sparse matrix U;
690  new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, StorageIndex> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
691 
692  m_info = Success;
693  m_factorizationIsOk = true;
694 }
695 
696 template<typename MappedSupernodalType>
697 struct SparseLUMatrixLReturnType : internal::no_assignment_operator
698 {
699  typedef typename MappedSupernodalType::Scalar Scalar;
700  explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
701  { }
702  Index rows() { return m_mapL.rows(); }
703  Index cols() { return m_mapL.cols(); }
704  template<typename Dest>
705  void solveInPlace( MatrixBase<Dest> &X) const
706  {
707  m_mapL.solveInPlace(X);
708  }
709  const MappedSupernodalType& m_mapL;
710 };
711 
712 template<typename MatrixLType, typename MatrixUType>
713 struct SparseLUMatrixUReturnType : internal::no_assignment_operator
714 {
715  typedef typename MatrixLType::Scalar Scalar;
716  explicit SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
717  : m_mapL(mapL),m_mapU(mapU)
718  { }
719  Index rows() { return m_mapL.rows(); }
720  Index cols() { return m_mapL.cols(); }
721 
722  template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
723  {
724  Index nrhs = X.cols();
725  Index n = X.rows();
726  // Backward solve with U
727  for (Index k = m_mapL.nsuper(); k >= 0; k--)
728  {
729  Index fsupc = m_mapL.supToCol()[k];
730  Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
731  Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
732  Index luptr = m_mapL.colIndexPtr()[fsupc];
733 
734  if (nsupc == 1)
735  {
736  for (Index j = 0; j < nrhs; j++)
737  {
738  X(fsupc, j) /= m_mapL.valuePtr()[luptr];
739  }
740  }
741  else
742  {
743  Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
744  Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
745  U = A.template triangularView<Upper>().solve(U);
746  }
747 
748  for (Index j = 0; j < nrhs; ++j)
749  {
750  for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
751  {
752  typename MatrixUType::InnerIterator it(m_mapU, jcol);
753  for ( ; it; ++it)
754  {
755  Index irow = it.index();
756  X(irow, j) -= X(jcol, j) * it.value();
757  }
758  }
759  }
760  } // End For U-solve
761  }
762  const MatrixLType& m_mapL;
763  const MatrixUType& m_mapU;
764 };
765 
766 } // End namespace Eigen
767 
768 #endif
ColXpr col(Index i)
Definition: DenseBase.h:778
Index cols() const
Definition: SparseMatrix.h:132
const PermutationType & rowsPermutation() const
Definition: SparseLU.h:158
A base class for sparse solvers.
Definition: SparseSolverBase.h:53
Definition: LDLT.h:16
std::string lastErrorMessage() const
Definition: SparseLU.h:204
void factorize(const MatrixType &matrix)
Definition: SparseLU.h:489
const unsigned int RowMajorBit
Definition: Constants.h:53
void resize(Index rows, Index cols)
Definition: PlainObjectBase.h:252
ComputationInfo info() const
Reports whether previous computation was successful.
Definition: SparseLU.h:195
Sparse supernodal LU factorization for general matrices.
Definition: SparseLU.h:17
void setPivotThreshold(const RealScalar &thresh)
Definition: SparseLU.h:171
Scalar absDeterminant()
Definition: SparseLU.h:246
SparseLUMatrixUReturnType< SCMatrix, MappedSparseMatrix< Scalar, ColMajor, StorageIndex > > matrixU() const
Definition: SparseLU.h:149
Derived & setZero(Index size)
Definition: CwiseNullaryOp.h:520
Expression of a fixed-size or dynamic-size sub-vector.
Definition: ForwardDeclarations.h:87
void compute(const MatrixType &matrix)
Definition: SparseLU.h:117
const PermutationType & colsPermutation() const
Definition: SparseLU.h:166
void isSymmetric(bool sym)
Definition: SparseLU.h:128
Definition: Constants.h:426
const Solve< SparseLU, Rhs > solve(const MatrixBase< Rhs > &B) const
Scalar determinant()
Definition: SparseLU.h:330
Definition: Constants.h:424
const IndicesType & indices() const
Definition: PermutationMatrix.h:390
Scalar logAbsDeterminant() const
Definition: SparseLU.h:276
Pseudo expression representing a solving operation.
Definition: Solve.h:63
TransposeReturnType inverse() const
Definition: PermutationMatrix.h:198
void analyzePattern(const MatrixType &matrix)
Definition: SparseLU.h:404
ComputationInfo
Definition: Constants.h:422
Scalar signDeterminant()
Definition: SparseLU.h:302
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48
SparseLUMatrixLReturnType< SCMatrix > matrixL() const
Definition: SparseLU.h:139
Index rows() const
Definition: SparseMatrix.h:130
Derived & setConstant(Index size, const Scalar &value)
Definition: CwiseNullaryOp.h:352