Eigen  3.2.91
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
41 namespace internal {
42 template<typename _Scalar, int _Options, typename _Index>
43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44 {
45  typedef _Scalar Scalar;
46  typedef _Index StorageIndex;
47  typedef Sparse StorageKind;
48  typedef MatrixXpr XprKind;
49  enum {
50  RowsAtCompileTime = Dynamic,
51  ColsAtCompileTime = Dynamic,
52  MaxRowsAtCompileTime = Dynamic,
53  MaxColsAtCompileTime = Dynamic,
54  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
55  SupportedAccessPatterns = InnerRandomAccessPattern
56  };
57 };
58 
59 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
60 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
61 {
62  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
63  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
64  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
65 
66  typedef _Scalar Scalar;
67  typedef Dense StorageKind;
68  typedef _Index StorageIndex;
69  typedef MatrixXpr XprKind;
70 
71  enum {
72  RowsAtCompileTime = Dynamic,
73  ColsAtCompileTime = 1,
74  MaxRowsAtCompileTime = Dynamic,
75  MaxColsAtCompileTime = 1,
76  Flags = LvalueBit
77  };
78 };
79 
80 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
81 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
82  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
83 {
84  enum {
85  Flags = 0
86  };
87 };
88 
89 } // end namespace internal
90 
91 template<typename _Scalar, int _Options, typename _Index>
93  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
94 {
95  public:
96  typedef SparseCompressedBase<SparseMatrix> Base;
97  using Base::isCompressed;
98  using Base::nonZeros;
99  _EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
100  using Base::operator+=;
101  using Base::operator-=;
102 
106  typedef typename Base::InnerIterator InnerIterator;
107  typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
108 
109 
110  using Base::IsRowMajor;
111  typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
112  enum {
113  Options = _Options
114  };
115 
116  typedef typename Base::IndexVector IndexVector;
117  typedef typename Base::ScalarVector ScalarVector;
118  protected:
120 
121  Index m_outerSize;
122  Index m_innerSize;
123  StorageIndex* m_outerIndex;
124  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
125  Storage m_data;
126 
127  public:
128 
130  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
132  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
133 
135  inline Index innerSize() const { return m_innerSize; }
137  inline Index outerSize() const { return m_outerSize; }
138 
142  inline const Scalar* valuePtr() const { return &m_data.value(0); }
146  inline Scalar* valuePtr() { return &m_data.value(0); }
147 
151  inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); }
155  inline StorageIndex* innerIndexPtr() { return &m_data.index(0); }
156 
160  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
164  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
165 
169  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
173  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
174 
176  inline Storage& data() { return m_data; }
178  inline const Storage& data() const { return m_data; }
179 
182  inline Scalar coeff(Index row, Index col) const
183  {
184  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
185 
186  const Index outer = IsRowMajor ? row : col;
187  const Index inner = IsRowMajor ? col : row;
188  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
189  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
190  }
191 
200  inline Scalar& coeffRef(Index row, Index col)
201  {
202  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
203 
204  const Index outer = IsRowMajor ? row : col;
205  const Index inner = IsRowMajor ? col : row;
206 
207  Index start = m_outerIndex[outer];
208  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
209  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
210  if(end<=start)
211  return insert(row,col);
212  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
213  if((p<end) && (m_data.index(p)==inner))
214  return m_data.value(p);
215  else
216  return insert(row,col);
217  }
218 
234  Scalar& insert(Index row, Index col);
235 
236  public:
237 
245  inline void setZero()
246  {
247  m_data.clear();
248  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
249  if(m_innerNonZeros)
250  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
251  }
252 
256  inline void reserve(Index reserveSize)
257  {
258  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
259  m_data.reserve(reserveSize);
260  }
261 
262  #ifdef EIGEN_PARSED_BY_DOXYGEN
263 
275  template<class SizesType>
276  inline void reserve(const SizesType& reserveSizes);
277  #else
278  template<class SizesType>
279  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
280  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
281  typename
282  #endif
283  SizesType::value_type())
284  {
285  EIGEN_UNUSED_VARIABLE(enableif);
286  reserveInnerVectors(reserveSizes);
287  }
288  #endif // EIGEN_PARSED_BY_DOXYGEN
289  protected:
290  template<class SizesType>
291  inline void reserveInnerVectors(const SizesType& reserveSizes)
292  {
293  if(isCompressed())
294  {
295  Index totalReserveSize = 0;
296  // turn the matrix into non-compressed mode
297  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
298  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
299 
300  // temporarily use m_innerSizes to hold the new starting points.
301  StorageIndex* newOuterIndex = m_innerNonZeros;
302 
303  StorageIndex count = 0;
304  for(Index j=0; j<m_outerSize; ++j)
305  {
306  newOuterIndex[j] = count;
307  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
308  totalReserveSize += reserveSizes[j];
309  }
310  m_data.reserve(totalReserveSize);
311  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
312  for(Index j=m_outerSize-1; j>=0; --j)
313  {
314  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
315  for(Index i=innerNNZ-1; i>=0; --i)
316  {
317  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
318  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
319  }
320  previousOuterIndex = m_outerIndex[j];
321  m_outerIndex[j] = newOuterIndex[j];
322  m_innerNonZeros[j] = innerNNZ;
323  }
324  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
325 
326  m_data.resize(m_outerIndex[m_outerSize]);
327  }
328  else
329  {
330  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
331  if (!newOuterIndex) internal::throw_std_bad_alloc();
332 
333  StorageIndex count = 0;
334  for(Index j=0; j<m_outerSize; ++j)
335  {
336  newOuterIndex[j] = count;
337  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
338  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
339  count += toReserve + m_innerNonZeros[j];
340  }
341  newOuterIndex[m_outerSize] = count;
342 
343  m_data.resize(count);
344  for(Index j=m_outerSize-1; j>=0; --j)
345  {
346  Index offset = newOuterIndex[j] - m_outerIndex[j];
347  if(offset>0)
348  {
349  StorageIndex innerNNZ = m_innerNonZeros[j];
350  for(Index i=innerNNZ-1; i>=0; --i)
351  {
352  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
353  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
354  }
355  }
356  }
357 
358  std::swap(m_outerIndex, newOuterIndex);
359  std::free(newOuterIndex);
360  }
361 
362  }
363  public:
364 
365  //--- low level purely coherent filling ---
366 
377  inline Scalar& insertBack(Index row, Index col)
378  {
379  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
380  }
381 
384  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
385  {
386  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
387  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
388  Index p = m_outerIndex[outer+1];
389  ++m_outerIndex[outer+1];
390  m_data.append(Scalar(0), inner);
391  return m_data.value(p);
392  }
393 
396  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
397  {
398  Index p = m_outerIndex[outer+1];
399  ++m_outerIndex[outer+1];
400  m_data.append(Scalar(0), inner);
401  return m_data.value(p);
402  }
403 
406  inline void startVec(Index outer)
407  {
408  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
409  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
410  m_outerIndex[outer+1] = m_outerIndex[outer];
411  }
412 
416  inline void finalize()
417  {
418  if(isCompressed())
419  {
420  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
421  Index i = m_outerSize;
422  // find the last filled column
423  while (i>=0 && m_outerIndex[i]==0)
424  --i;
425  ++i;
426  while (i<=m_outerSize)
427  {
428  m_outerIndex[i] = size;
429  ++i;
430  }
431  }
432  }
433 
434  //---
435 
436  template<typename InputIterators>
437  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
438 
439  void sumupDuplicates();
440 
441  //---
442 
445  Scalar& insertByOuterInner(Index j, Index i)
446  {
447  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
448  }
449 
453  {
454  if(isCompressed())
455  return;
456 
457  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
458 
459  Index oldStart = m_outerIndex[1];
460  m_outerIndex[1] = m_innerNonZeros[0];
461  for(Index j=1; j<m_outerSize; ++j)
462  {
463  Index nextOldStart = m_outerIndex[j+1];
464  Index offset = oldStart - m_outerIndex[j];
465  if(offset>0)
466  {
467  for(Index k=0; k<m_innerNonZeros[j]; ++k)
468  {
469  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
470  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
471  }
472  }
473  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
474  oldStart = nextOldStart;
475  }
476  std::free(m_innerNonZeros);
477  m_innerNonZeros = 0;
478  m_data.resize(m_outerIndex[m_outerSize]);
479  m_data.squeeze();
480  }
481 
483  void uncompress()
484  {
485  if(m_innerNonZeros != 0)
486  return;
487  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
488  for (Index i = 0; i < m_outerSize; i++)
489  {
490  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
491  }
492  }
493 
495  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
496  {
497  prune(default_prunning_func(reference,epsilon));
498  }
499 
507  template<typename KeepFunc>
508  void prune(const KeepFunc& keep = KeepFunc())
509  {
510  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
511  // TODO also implement a unit test
512  makeCompressed();
513 
514  StorageIndex k = 0;
515  for(Index j=0; j<m_outerSize; ++j)
516  {
517  Index previousStart = m_outerIndex[j];
518  m_outerIndex[j] = k;
519  Index end = m_outerIndex[j+1];
520  for(Index i=previousStart; i<end; ++i)
521  {
522  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
523  {
524  m_data.value(k) = m_data.value(i);
525  m_data.index(k) = m_data.index(i);
526  ++k;
527  }
528  }
529  }
530  m_outerIndex[m_outerSize] = k;
531  m_data.resize(k,0);
532  }
533 
538  {
539  // No change
540  if (this->rows() == rows && this->cols() == cols) return;
541 
542  // If one dimension is null, then there is nothing to be preserved
543  if(rows==0 || cols==0) return resize(rows,cols);
544 
545  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
546  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
547  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
548 
549  // Deals with inner non zeros
550  if (m_innerNonZeros)
551  {
552  // Resize m_innerNonZeros
553  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
554  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
555  m_innerNonZeros = newInnerNonZeros;
556 
557  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
558  m_innerNonZeros[i] = 0;
559  }
560  else if (innerChange < 0)
561  {
562  // Inner size decreased: allocate a new m_innerNonZeros
563  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
564  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
565  for(Index i = 0; i < m_outerSize; i++)
566  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
567  }
568 
569  // Change the m_innerNonZeros in case of a decrease of inner size
570  if (m_innerNonZeros && innerChange < 0)
571  {
572  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
573  {
574  StorageIndex &n = m_innerNonZeros[i];
575  StorageIndex start = m_outerIndex[i];
576  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
577  }
578  }
579 
580  m_innerSize = newInnerSize;
581 
582  // Re-allocate outer index structure if necessary
583  if (outerChange == 0)
584  return;
585 
586  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
587  if (!newOuterIndex) internal::throw_std_bad_alloc();
588  m_outerIndex = newOuterIndex;
589  if (outerChange > 0)
590  {
591  StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
592  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
593  m_outerIndex[i] = last;
594  }
595  m_outerSize += outerChange;
596  }
597 
606  {
607  const Index outerSize = IsRowMajor ? rows : cols;
608  m_innerSize = IsRowMajor ? cols : rows;
609  m_data.clear();
610  if (m_outerSize != outerSize || m_outerSize==0)
611  {
612  std::free(m_outerIndex);
613  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
614  if (!m_outerIndex) internal::throw_std_bad_alloc();
615 
616  m_outerSize = outerSize;
617  }
618  if(m_innerNonZeros)
619  {
620  std::free(m_innerNonZeros);
621  m_innerNonZeros = 0;
622  }
623  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
624  }
625 
628  void resizeNonZeros(Index size)
629  {
630  // TODO remove this function
631  m_data.resize(size);
632  }
633 
635  const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
636 
641  DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
642 
644  inline SparseMatrix()
645  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
646  {
647  check_template_parameters();
648  resize(0, 0);
649  }
650 
653  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
654  {
655  check_template_parameters();
656  resize(rows, cols);
657  }
658 
660  template<typename OtherDerived>
662  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
663  {
664  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
665  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
666  check_template_parameters();
667  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
668  if (needToTranspose) *this = other.derived();
669  else internal::call_assignment_no_alias(*this, other.derived());
670  }
671 
673  template<typename OtherDerived, unsigned int UpLo>
675  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
676  {
677  check_template_parameters();
678  Base::operator=(other);
679  }
680 
682  inline SparseMatrix(const SparseMatrix& other)
683  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
684  {
685  check_template_parameters();
686  *this = other.derived();
687  }
688 
690  template<typename OtherDerived>
691  SparseMatrix(const ReturnByValue<OtherDerived>& other)
692  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
693  {
694  check_template_parameters();
695  initAssignment(other);
696  other.evalTo(*this);
697  }
698 
700  template<typename OtherDerived>
701  explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
702  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703  {
704  check_template_parameters();
705  *this = other.derived();
706  }
707 
710  inline void swap(SparseMatrix& other)
711  {
712  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
713  std::swap(m_outerIndex, other.m_outerIndex);
714  std::swap(m_innerSize, other.m_innerSize);
715  std::swap(m_outerSize, other.m_outerSize);
716  std::swap(m_innerNonZeros, other.m_innerNonZeros);
717  m_data.swap(other.m_data);
718  }
719 
721  inline void setIdentity()
722  {
723  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
724  this->m_data.resize(rows());
725  Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1));
726  Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes();
727  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
728  }
729  inline SparseMatrix& operator=(const SparseMatrix& other)
730  {
731  if (other.isRValue())
732  {
733  swap(other.const_cast_derived());
734  }
735  else if(this!=&other)
736  {
737  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
738  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
739  #endif
740  initAssignment(other);
741  if(other.isCompressed())
742  {
743  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
744  m_data = other.m_data;
745  }
746  else
747  {
748  Base::operator=(other);
749  }
750  }
751  return *this;
752  }
753 
754 #ifndef EIGEN_PARSED_BY_DOXYGEN
755  template<typename OtherDerived>
756  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
757  { return Base::operator=(other.derived()); }
758 #endif // EIGEN_PARSED_BY_DOXYGEN
759 
760  template<typename OtherDerived>
761  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
762 
763  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
764  {
765  EIGEN_DBG_SPARSE(
766  s << "Nonzero entries:\n";
767  if(m.isCompressed())
768  for (Index i=0; i<m.nonZeros(); ++i)
769  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
770  else
771  for (Index i=0; i<m.outerSize(); ++i)
772  {
773  Index p = m.m_outerIndex[i];
774  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
775  Index k=p;
776  for (; k<pe; ++k)
777  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
778  for (; k<m.m_outerIndex[i+1]; ++k)
779  s << "(_,_) ";
780  }
781  s << std::endl;
782  s << std::endl;
783  s << "Outer pointers:\n";
784  for (Index i=0; i<m.outerSize(); ++i)
785  s << m.m_outerIndex[i] << " ";
786  s << " $" << std::endl;
787  if(!m.isCompressed())
788  {
789  s << "Inner non zeros:\n";
790  for (Index i=0; i<m.outerSize(); ++i)
791  s << m.m_innerNonZeros[i] << " ";
792  s << " $" << std::endl;
793  }
794  s << std::endl;
795  );
796  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
797  return s;
798  }
799 
801  inline ~SparseMatrix()
802  {
803  std::free(m_outerIndex);
804  std::free(m_innerNonZeros);
805  }
806 
808  Scalar sum() const;
809 
810 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
811 # include EIGEN_SPARSEMATRIX_PLUGIN
812 # endif
813 
814 protected:
815 
816  template<typename Other>
817  void initAssignment(const Other& other)
818  {
819  resize(other.rows(), other.cols());
820  if(m_innerNonZeros)
821  {
822  std::free(m_innerNonZeros);
823  m_innerNonZeros = 0;
824  }
825  }
826 
829  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
830 
833  class SingletonVector
834  {
835  StorageIndex m_index;
836  StorageIndex m_value;
837  public:
838  typedef StorageIndex value_type;
839  SingletonVector(Index i, Index v)
840  : m_index(convert_index(i)), m_value(convert_index(v))
841  {}
842 
843  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
844  };
845 
848  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
849 
850 public:
853  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
854  {
855  const Index outer = IsRowMajor ? row : col;
856  const Index inner = IsRowMajor ? col : row;
857 
858  eigen_assert(!isCompressed());
859  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
860 
861  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
862  m_data.index(p) = convert_index(inner);
863  return (m_data.value(p) = 0);
864  }
865 
866 private:
867  static void check_template_parameters()
868  {
869  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
870  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
871  }
872 
873  struct default_prunning_func {
874  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
875  inline bool operator() (const Index&, const Index&, const Scalar& value) const
876  {
877  return !internal::isMuchSmallerThan(value, reference, epsilon);
878  }
879  Scalar reference;
880  RealScalar epsilon;
881  };
882 };
883 
884 namespace internal {
885 
886 template<typename InputIterator, typename SparseMatrixType>
887 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
888 {
889  EIGEN_UNUSED_VARIABLE(Options);
890  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
891  typedef typename SparseMatrixType::Scalar Scalar;
892  typedef typename SparseMatrixType::StorageIndex StorageIndex;
893  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
894 
895  if(begin!=end)
896  {
897  // pass 1: count the nnz per inner-vector
898  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
899  wi.setZero();
900  for(InputIterator it(begin); it!=end; ++it)
901  {
902  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
903  wi(IsRowMajor ? it->col() : it->row())++;
904  }
905 
906  // pass 2: insert all the elements into trMat
907  trMat.reserve(wi);
908  for(InputIterator it(begin); it!=end; ++it)
909  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
910 
911  // pass 3:
912  trMat.sumupDuplicates();
913  }
914 
915  // pass 4: transposed copy -> implicit sorting
916  mat = trMat;
917 }
918 
919 }
920 
921 
959 template<typename Scalar, int _Options, typename _Index>
960 template<typename InputIterators>
961 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
962 {
963  internal::set_from_triplets(begin, end, *this);
964 }
965 
967 template<typename Scalar, int _Options, typename _Index>
969 {
970  eigen_assert(!isCompressed());
971  // TODO, in practice we should be able to use m_innerNonZeros for that task
972  IndexVector wi(innerSize());
973  wi.fill(-1);
974  StorageIndex count = 0;
975  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
976  for(Index j=0; j<outerSize(); ++j)
977  {
978  StorageIndex start = count;
979  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
980  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
981  {
982  Index i = m_data.index(k);
983  if(wi(i)>=start)
984  {
985  // we already meet this entry => accumulate it
986  m_data.value(wi(i)) += m_data.value(k);
987  }
988  else
989  {
990  m_data.value(count) = m_data.value(k);
991  m_data.index(count) = m_data.index(k);
992  wi(i) = count;
993  ++count;
994  }
995  }
996  m_outerIndex[j] = start;
997  }
998  m_outerIndex[m_outerSize] = count;
999 
1000  // turn the matrix into compressed form
1001  std::free(m_innerNonZeros);
1002  m_innerNonZeros = 0;
1003  m_data.resize(m_outerIndex[m_outerSize]);
1004 }
1005 
1006 template<typename Scalar, int _Options, typename _Index>
1007 template<typename OtherDerived>
1008 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
1009 {
1010  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1011  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1012 
1013  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1014  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1015  #endif
1016 
1017  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1018  if (needToTranspose)
1019  {
1020  // two passes algorithm:
1021  // 1 - compute the number of coeffs per dest inner vector
1022  // 2 - do the actual copy/eval
1023  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1024  typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1025  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1026  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1027  OtherCopy otherCopy(other.derived());
1028  OtherCopyEval otherCopyEval(otherCopy);
1029 
1030  SparseMatrix dest(other.rows(),other.cols());
1031  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1032 
1033  // pass 1
1034  // FIXME the above copy could be merged with that pass
1035  for (Index j=0; j<otherCopy.outerSize(); ++j)
1036  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1037  ++dest.m_outerIndex[it.index()];
1038 
1039  // prefix sum
1040  StorageIndex count = 0;
1041  IndexVector positions(dest.outerSize());
1042  for (Index j=0; j<dest.outerSize(); ++j)
1043  {
1044  Index tmp = dest.m_outerIndex[j];
1045  dest.m_outerIndex[j] = count;
1046  positions[j] = count;
1047  count += tmp;
1048  }
1049  dest.m_outerIndex[dest.outerSize()] = count;
1050  // alloc
1051  dest.m_data.resize(count);
1052  // pass 2
1053  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1054  {
1055  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1056  {
1057  Index pos = positions[it.index()]++;
1058  dest.m_data.index(pos) = j;
1059  dest.m_data.value(pos) = it.value();
1060  }
1061  }
1062  this->swap(dest);
1063  return *this;
1064  }
1065  else
1066  {
1067  if(other.isRValue())
1068  {
1069  initAssignment(other.derived());
1070  }
1071  // there is no special optimization
1072  return Base::operator=(other.derived());
1073  }
1074 }
1075 
1076 template<typename _Scalar, int _Options, typename _Index>
1077 typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insert(Index row, Index col)
1078 {
1079  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1080 
1081  const Index outer = IsRowMajor ? row : col;
1082  const Index inner = IsRowMajor ? col : row;
1083 
1084  if(isCompressed())
1085  {
1086  if(nonZeros()==0)
1087  {
1088  // reserve space if not already done
1089  if(m_data.allocatedSize()==0)
1090  m_data.reserve(2*m_innerSize);
1091 
1092  // turn the matrix into non-compressed mode
1093  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1094  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1095 
1096  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1097 
1098  // pack all inner-vectors to the end of the pre-allocated space
1099  // and allocate the entire free-space to the first inner-vector
1100  StorageIndex end = convert_index(m_data.allocatedSize());
1101  for(Index j=1; j<=m_outerSize; ++j)
1102  m_outerIndex[j] = end;
1103  }
1104  }
1105 
1106  // check whether we can do a fast "push back" insertion
1107  Index data_end = m_data.allocatedSize();
1108 
1109  // First case: we are filling a new inner vector which is packed at the end.
1110  // We assume that all remaining inner-vectors are also empty and packed to the end.
1111  if(m_outerIndex[outer]==data_end)
1112  {
1113  eigen_internal_assert(m_innerNonZeros[outer]==0);
1114 
1115  // pack previous empty inner-vectors to end of the used-space
1116  // and allocate the entire free-space to the current inner-vector.
1117  StorageIndex p = convert_index(m_data.size());
1118  Index j = outer;
1119  while(j>=0 && m_innerNonZeros[j]==0)
1120  m_outerIndex[j--] = p;
1121 
1122  // push back the new element
1123  ++m_innerNonZeros[outer];
1124  m_data.append(Scalar(0), inner);
1125 
1126  // check for reallocation
1127  if(data_end != m_data.allocatedSize())
1128  {
1129  // m_data has been reallocated
1130  // -> move remaining inner-vectors back to the end of the free-space
1131  // so that the entire free-space is allocated to the current inner-vector.
1132  eigen_internal_assert(data_end < m_data.allocatedSize());
1133  StorageIndex new_end = convert_index(m_data.allocatedSize());
1134  for(Index k=outer+1; k<=m_outerSize; ++k)
1135  if(m_outerIndex[k]==data_end)
1136  m_outerIndex[k] = new_end;
1137  }
1138  return m_data.value(p);
1139  }
1140 
1141  // Second case: the next inner-vector is packed to the end
1142  // and the current inner-vector end match the used-space.
1143  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1144  {
1145  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1146 
1147  // add space for the new element
1148  ++m_innerNonZeros[outer];
1149  m_data.resize(m_data.size()+1);
1150 
1151  // check for reallocation
1152  if(data_end != m_data.allocatedSize())
1153  {
1154  // m_data has been reallocated
1155  // -> move remaining inner-vectors back to the end of the free-space
1156  // so that the entire free-space is allocated to the current inner-vector.
1157  eigen_internal_assert(data_end < m_data.allocatedSize());
1158  StorageIndex new_end = convert_index(m_data.allocatedSize());
1159  for(Index k=outer+1; k<=m_outerSize; ++k)
1160  if(m_outerIndex[k]==data_end)
1161  m_outerIndex[k] = new_end;
1162  }
1163 
1164  // and insert it at the right position (sorted insertion)
1165  Index startId = m_outerIndex[outer];
1166  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1167  while ( (p > startId) && (m_data.index(p-1) > inner) )
1168  {
1169  m_data.index(p) = m_data.index(p-1);
1170  m_data.value(p) = m_data.value(p-1);
1171  --p;
1172  }
1173 
1174  m_data.index(p) = convert_index(inner);
1175  return (m_data.value(p) = 0);
1176  }
1177 
1178  if(m_data.size() != m_data.allocatedSize())
1179  {
1180  // make sure the matrix is compatible to random un-compressed insertion:
1181  m_data.resize(m_data.allocatedSize());
1182  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(2*m_outerSize, convert_index(m_outerSize)));
1183  }
1184 
1185  return insertUncompressed(row,col);
1186 }
1187 
1188 template<typename _Scalar, int _Options, typename _Index>
1189 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1190 {
1191  eigen_assert(!isCompressed());
1192 
1193  const Index outer = IsRowMajor ? row : col;
1194  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1195 
1196  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1197  StorageIndex innerNNZ = m_innerNonZeros[outer];
1198  if(innerNNZ>=room)
1199  {
1200  // this inner vector is full, we need to reallocate the whole buffer :(
1201  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1202  }
1203 
1204  Index startId = m_outerIndex[outer];
1205  Index p = startId + m_innerNonZeros[outer];
1206  while ( (p > startId) && (m_data.index(p-1) > inner) )
1207  {
1208  m_data.index(p) = m_data.index(p-1);
1209  m_data.value(p) = m_data.value(p-1);
1210  --p;
1211  }
1212  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1213 
1214  m_innerNonZeros[outer]++;
1215 
1216  m_data.index(p) = inner;
1217  return (m_data.value(p) = 0);
1218 }
1219 
1220 template<typename _Scalar, int _Options, typename _Index>
1221 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1222 {
1223  eigen_assert(isCompressed());
1224 
1225  const Index outer = IsRowMajor ? row : col;
1226  const Index inner = IsRowMajor ? col : row;
1227 
1228  Index previousOuter = outer;
1229  if (m_outerIndex[outer+1]==0)
1230  {
1231  // we start a new inner vector
1232  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1233  {
1234  m_outerIndex[previousOuter] = convert_index(m_data.size());
1235  --previousOuter;
1236  }
1237  m_outerIndex[outer+1] = m_outerIndex[outer];
1238  }
1239 
1240  // here we have to handle the tricky case where the outerIndex array
1241  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1242  // the 2nd inner vector...
1243  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1244  && (size_t(m_outerIndex[outer+1]) == m_data.size());
1245 
1246  size_t startId = m_outerIndex[outer];
1247  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1248  size_t p = m_outerIndex[outer+1];
1249  ++m_outerIndex[outer+1];
1250 
1251  double reallocRatio = 1;
1252  if (m_data.allocatedSize()<=m_data.size())
1253  {
1254  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1255  if (m_data.size()==0)
1256  {
1257  m_data.reserve(32);
1258  }
1259  else
1260  {
1261  // we need to reallocate the data, to reduce multiple reallocations
1262  // we use a smart resize algorithm based on the current filling ratio
1263  // in addition, we use double to avoid integers overflows
1264  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1265  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1266  // furthermore we bound the realloc ratio to:
1267  // 1) reduce multiple minor realloc when the matrix is almost filled
1268  // 2) avoid to allocate too much memory when the matrix is almost empty
1269  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1270  }
1271  }
1272  m_data.resize(m_data.size()+1,reallocRatio);
1273 
1274  if (!isLastVec)
1275  {
1276  if (previousOuter==-1)
1277  {
1278  // oops wrong guess.
1279  // let's correct the outer offsets
1280  for (Index k=0; k<=(outer+1); ++k)
1281  m_outerIndex[k] = 0;
1282  Index k=outer+1;
1283  while(m_outerIndex[k]==0)
1284  m_outerIndex[k++] = 1;
1285  while (k<=m_outerSize && m_outerIndex[k]!=0)
1286  m_outerIndex[k++]++;
1287  p = 0;
1288  --k;
1289  k = m_outerIndex[k]-1;
1290  while (k>0)
1291  {
1292  m_data.index(k) = m_data.index(k-1);
1293  m_data.value(k) = m_data.value(k-1);
1294  k--;
1295  }
1296  }
1297  else
1298  {
1299  // we are not inserting into the last inner vec
1300  // update outer indices:
1301  Index j = outer+2;
1302  while (j<=m_outerSize && m_outerIndex[j]!=0)
1303  m_outerIndex[j++]++;
1304  --j;
1305  // shift data of last vecs:
1306  Index k = m_outerIndex[j]-1;
1307  while (k>=Index(p))
1308  {
1309  m_data.index(k) = m_data.index(k-1);
1310  m_data.value(k) = m_data.value(k-1);
1311  k--;
1312  }
1313  }
1314  }
1315 
1316  while ( (p > startId) && (m_data.index(p-1) > inner) )
1317  {
1318  m_data.index(p) = m_data.index(p-1);
1319  m_data.value(p) = m_data.value(p-1);
1320  --p;
1321  }
1322 
1323  m_data.index(p) = inner;
1324  return (m_data.value(p) = 0);
1325 }
1326 
1327 namespace internal {
1328 
1329 template<typename _Scalar, int _Options, typename _Index>
1330 struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
1331  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
1332 {
1333  typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > > Base;
1334  typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
1335  evaluator() : Base() {}
1336  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1337 };
1338 
1339 }
1340 
1341 } // end namespace Eigen
1342 
1343 #endif // EIGEN_SPARSEMATRIX_H
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:164
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1077
Index cols() const
Definition: SparseMatrix.h:132
Index size() const
Definition: SparseMatrixBase.h:157
Definition: Constants.h:314
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:537
const unsigned int CompressedAccessBit
Definition: Constants.h:177
A versatible sparse matrix representation.
Definition: SparseMatrix.h:92
void uncompress()
Definition: SparseMatrix.h:483
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:508
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:641
~SparseMatrix()
Definition: SparseMatrix.h:801
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:89
RowXpr row(Index i)
Definition: SparseMatrixBase.h:797
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:200
const unsigned int LvalueBit
Definition: Constants.h:130
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:682
Definition: LDLT.h:16
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:160
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:43
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:173
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:107
Derived & derived()
Definition: EigenBase.h:44
Index outerSize() const
Definition: SparseMatrix.h:137
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:37
const unsigned int RowMajorBit
Definition: Constants.h:53
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:701
Scalar * valuePtr()
Definition: SparseMatrix.h:146
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:961
void setIdentity()
Definition: SparseMatrix.h:721
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:26
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:495
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:710
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:605
void reserve(Index reserveSize)
Definition: SparseMatrix.h:256
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:652
void makeCompressed()
Definition: SparseMatrix.h:452
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:661
Scalar value_type
Definition: SparseMatrixBase.h:35
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:182
Index innerSize() const
Definition: SparseMatrix.h:135
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:151
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:691
SparseMatrix()
Definition: SparseMatrix.h:644
Definition: Eigen_Colamd.h:54
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:155
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:635
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:169
ColXpr col(Index i)
Definition: SparseMatrixBase.h:778
Definition: Constants.h:312
void setZero()
Definition: SparseMatrix.h:245
Definition: SparseMatrixBase.h:85
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:674
const Scalar * valuePtr() const
Definition: SparseMatrix.h:142
Sparse matrix.
Definition: MappedSparseMatrix.h:32
Index rows() const
Definition: SparseMatrix.h:130
Scalar sum() const
Definition: SparseRedux.h:30