1 #ifndef VIENNACL_COMPRESSED_MATRIX_HPP_
2 #define VIENNACL_COMPRESSED_MATRIX_HPP_
40 template <
typename CPU_MATRIX,
typename SCALARTYPE,
unsigned int ALIGNMENT>
50 std::vector<SCALARTYPE> elements(nonzeros);
55 for (
typename CPU_MATRIX::const_iterator1 row_it = cpu_matrix.begin1();
56 row_it != cpu_matrix.end1();
59 row_buffer.set(row_index, data_index);
62 for (
typename CPU_MATRIX::const_iterator2 col_it = row_it.begin();
63 col_it != row_it.end();
66 col_buffer.set(data_index, col_it.index2());
67 elements[data_index] = *col_it;
70 data_index = viennacl::tools::align_to_multiple<vcl_size_t>(data_index, ALIGNMENT);
72 row_buffer.set(row_index, data_index);
74 gpu_matrix.
set(row_buffer.get(),
98 template <
typename CPU_MATRIX,
typename SCALARTYPE,
unsigned int ALIGNMENT>
99 void copy(
const CPU_MATRIX & cpu_matrix,
102 if ( cpu_matrix.size1() > 0 && cpu_matrix.size2() > 0 )
106 for (
typename CPU_MATRIX::const_iterator1 row_it = cpu_matrix.begin1();
107 row_it != cpu_matrix.end1();
111 for (
typename CPU_MATRIX::const_iterator2 col_it = row_it.begin();
112 col_it != row_it.end();
117 num_entries += viennacl::tools::align_to_multiple<vcl_size_t>(entries_per_row, ALIGNMENT);
120 if (num_entries == 0)
135 template <
typename SizeType,
typename SCALARTYPE,
unsigned int ALIGNMENT>
136 void copy(
const std::vector< std::map<SizeType, SCALARTYPE> > & cpu_matrix,
141 for (
vcl_size_t i=0; i<cpu_matrix.size(); ++i)
143 if (cpu_matrix[i].
size() > 0)
144 nonzeros += ((cpu_matrix[i].
size() - 1) / ALIGNMENT + 1) * ALIGNMENT;
145 if (cpu_matrix[i].
size() > 0)
146 max_col = std::max<vcl_size_t>(max_col, (cpu_matrix[i].rbegin())->first);
154 #ifdef VIENNACL_WITH_UBLAS
155 template <
typename ScalarType,
typename F, vcl_
size_t IB,
typename IA,
typename TA>
156 void copy(
const boost::numeric::ublas::compressed_matrix<ScalarType, F, IB, IA, TA> & ublas_matrix,
164 for (
vcl_size_t i=0; i<=ublas_matrix.size1(); ++i)
165 row_buffer.
set(i, ublas_matrix.index1_data()[i]);
168 for (
vcl_size_t i=0; i<ublas_matrix.nnz(); ++i)
169 col_buffer.
set(i, ublas_matrix.index2_data()[i]);
171 gpu_matrix.
set(row_buffer.get(),
173 &(ublas_matrix.value_data()[0]),
174 ublas_matrix.size1(),
175 ublas_matrix.size2(),
181 #ifdef VIENNACL_WITH_EIGEN
182 template <
typename SCALARTYPE,
int flags,
unsigned int ALIGNMENT>
183 void copy(
const Eigen::SparseMatrix<SCALARTYPE, flags> & eigen_matrix,
184 compressed_matrix<SCALARTYPE, ALIGNMENT> & gpu_matrix)
186 assert( (gpu_matrix.size1() == 0 ||
static_cast<vcl_size_t>(eigen_matrix.rows()) == gpu_matrix.size1()) &&
bool(
"Size mismatch") );
187 assert( (gpu_matrix.size2() == 0 ||
static_cast<vcl_size_t>(eigen_matrix.cols()) == gpu_matrix.size2()) &&
bool(
"Size mismatch") );
189 std::vector< std::map<unsigned int, SCALARTYPE> > stl_matrix(eigen_matrix.rows());
191 for (
int k=0; k < eigen_matrix.outerSize(); ++k)
192 for (
typename Eigen::SparseMatrix<SCALARTYPE, flags>::InnerIterator it(eigen_matrix, k); it; ++it)
193 stl_matrix[it.row()][it.col()] = it.value();
195 copy(tools::const_sparse_matrix_adapter<SCALARTYPE>(stl_matrix, eigen_matrix.rows(), eigen_matrix.cols()), gpu_matrix);
200 #ifdef VIENNACL_WITH_MTL4
201 template <
typename SCALARTYPE,
unsigned int ALIGNMENT>
202 void copy(
const mtl::compressed2D<SCALARTYPE> & cpu_matrix,
203 compressed_matrix<SCALARTYPE, ALIGNMENT> & gpu_matrix)
205 assert( (gpu_matrix.size1() == 0 ||
static_cast<vcl_size_t>(cpu_matrix.num_rows()) == gpu_matrix.size1()) &&
bool(
"Size mismatch") );
206 assert( (gpu_matrix.size2() == 0 ||
static_cast<vcl_size_t>(cpu_matrix.num_cols()) == gpu_matrix.size2()) &&
bool(
"Size mismatch") );
208 typedef mtl::compressed2D<SCALARTYPE> MatrixType;
210 std::vector< std::map<unsigned int, SCALARTYPE> > stl_matrix(cpu_matrix.num_rows());
212 using mtl::traits::range_generator;
213 using mtl::traits::range::min;
216 typedef typename min<range_generator<mtl::tag::row, MatrixType>,
217 range_generator<mtl::tag::col, MatrixType> >::type range_type;
221 typedef typename range_type::type c_type;
223 typedef typename mtl::traits::range_generator<mtl::tag::nz, c_type>::type ic_type;
226 typename mtl::traits::row<MatrixType>::type
row(cpu_matrix);
227 typename mtl::traits::col<MatrixType>::type col(cpu_matrix);
228 typename mtl::traits::const_value<MatrixType>::type value(cpu_matrix);
231 for (c_type cursor(my_range.begin(cpu_matrix)), cend(my_range.end(cpu_matrix)); cursor != cend; ++cursor)
232 for (ic_type icursor(mtl::begin<mtl::tag::nz>(cursor)), icend(mtl::end<mtl::tag::nz>(cursor)); icursor != icend; ++icursor)
233 stl_matrix[
row(*icursor)][col(*icursor)] = value(*icursor);
235 copy(tools::const_sparse_matrix_adapter<SCALARTYPE>(stl_matrix, cpu_matrix.num_rows(), cpu_matrix.num_cols()), gpu_matrix);
257 template <
typename CPU_MATRIX,
typename SCALARTYPE,
unsigned int ALIGNMENT>
259 CPU_MATRIX & cpu_matrix )
264 if ( gpu_matrix.
size1() > 0 && gpu_matrix.
size2() > 0 )
269 std::vector<SCALARTYPE> elements(gpu_matrix.
nnz());
281 while (data_index < row_buffer[
row])
283 if (col_buffer[data_index] >= gpu_matrix.
size2())
285 std::cerr <<
"ViennaCL encountered invalid data at colbuffer[" << data_index <<
"]: " << col_buffer[data_index] << std::endl;
289 if (elements[data_index] != static_cast<SCALARTYPE>(0.0))
290 cpu_matrix(row-1, static_cast<vcl_size_t>(col_buffer[data_index])) = elements[data_index];
303 template <
typename SCALARTYPE,
unsigned int ALIGNMENT>
305 std::vector< std::map<unsigned int, SCALARTYPE> > & cpu_matrix)
308 copy(gpu_matrix, temp);
311 #ifdef VIENNACL_WITH_UBLAS
312 template <
typename ScalarType,
unsigned int ALIGNMENT,
typename F, vcl_
size_t IB,
typename IA,
typename TA>
314 boost::numeric::ublas::compressed_matrix<ScalarType> & ublas_matrix)
325 ublas_matrix.clear();
326 ublas_matrix.reserve(gpu_matrix.
nnz());
328 ublas_matrix.set_filled(gpu_matrix.
size1() + 1, gpu_matrix.
nnz());
330 for (
vcl_size_t i=0; i<ublas_matrix.size1() + 1; ++i)
331 ublas_matrix.index1_data()[i] = row_buffer[i];
333 for (
vcl_size_t i=0; i<ublas_matrix.nnz(); ++i)
334 ublas_matrix.index2_data()[i] = col_buffer[i];
341 #ifdef VIENNACL_WITH_EIGEN
342 template <
typename SCALARTYPE,
int flags,
unsigned int ALIGNMENT>
343 void copy(compressed_matrix<SCALARTYPE, ALIGNMENT> & gpu_matrix,
344 Eigen::SparseMatrix<SCALARTYPE, flags> & eigen_matrix)
346 assert( (static_cast<vcl_size_t>(eigen_matrix.rows()) == gpu_matrix.size1()) &&
bool(
"Size mismatch") );
347 assert( (static_cast<vcl_size_t>(eigen_matrix.cols()) == gpu_matrix.size2()) &&
bool(
"Size mismatch") );
349 if ( gpu_matrix.size1() > 0 && gpu_matrix.size2() > 0 )
354 std::vector<SCALARTYPE> elements(gpu_matrix.nnz());
360 eigen_matrix.setZero();
364 while (data_index < row_buffer[
row])
366 assert(col_buffer[data_index] < gpu_matrix.size2() && bool(
"ViennaCL encountered invalid data at col_buffer"));
367 if (elements[data_index] != static_cast<SCALARTYPE>(0.0))
368 eigen_matrix.insert(row-1, col_buffer[data_index]) = elements[data_index];
378 #ifdef VIENNACL_WITH_MTL4
379 template <
typename SCALARTYPE,
unsigned int ALIGNMENT>
380 void copy(compressed_matrix<SCALARTYPE, ALIGNMENT> & gpu_matrix,
381 mtl::compressed2D<SCALARTYPE> & mtl4_matrix)
383 assert( (static_cast<vcl_size_t>(mtl4_matrix.num_rows()) == gpu_matrix.size1()) &&
bool(
"Size mismatch") );
384 assert( (static_cast<vcl_size_t>(mtl4_matrix.num_cols()) == gpu_matrix.size2()) &&
bool(
"Size mismatch") );
386 if ( gpu_matrix.size1() > 0 && gpu_matrix.size2() > 0 )
392 std::vector<SCALARTYPE> elements(gpu_matrix.nnz());
401 mtl::matrix::inserter< mtl::compressed2D<SCALARTYPE> > ins(mtl4_matrix);
405 while (data_index < row_buffer[row])
407 assert(col_buffer[data_index] < gpu_matrix.size2() && bool(
"ViennaCL encountered invalid data at col_buffer"));
408 if (elements[data_index] != static_cast<SCALARTYPE>(0.0))
409 ins(row-1, col_buffer[data_index]) <<
typename mtl::Collection< mtl::compressed2D<SCALARTYPE> >::value_type(elements[data_index]);
427 template<
class SCALARTYPE,
unsigned int ALIGNMENT >
446 : rows_(rows), cols_(cols), nonzeros_(nonzeros)
452 #ifdef VIENNACL_WITH_OPENCL
455 row_buffer_.opencl_handle().context(ctx.opencl_context());
456 col_buffer_.opencl_handle().context(ctx.opencl_context());
457 elements_.opencl_handle().context(ctx.opencl_context());
478 : rows_(rows), cols_(cols), nonzeros_(0)
484 #ifdef VIENNACL_WITH_OPENCL
487 row_buffer_.opencl_handle().context(ctx.opencl_context());
488 col_buffer_.opencl_handle().context(ctx.opencl_context());
489 elements_.opencl_handle().context(ctx.opencl_context());
504 #ifdef VIENNACL_WITH_OPENCL
507 row_buffer_.opencl_handle().context(ctx.opencl_context());
508 col_buffer_.opencl_handle().context(ctx.opencl_context());
509 elements_.opencl_handle().context(ctx.opencl_context());
515 #ifdef VIENNACL_WITH_OPENCL
516 explicit compressed_matrix(cl_mem mem_row_buffer, cl_mem mem_col_buffer, cl_mem mem_elements,
518 rows_(rows), cols_(cols), nonzeros_(nonzeros)
521 row_buffer_.opencl_handle() = mem_row_buffer;
522 row_buffer_.opencl_handle().inc();
523 row_buffer_.
raw_size(
sizeof(cl_uint) * (rows + 1));
526 col_buffer_.opencl_handle() = mem_col_buffer;
527 col_buffer_.opencl_handle().inc();
528 col_buffer_.
raw_size(
sizeof(cl_uint) * nonzeros);
531 elements_.opencl_handle() = mem_elements;
532 elements_.opencl_handle().inc();
533 elements_.
raw_size(
sizeof(SCALARTYPE) * nonzeros);
541 assert( (rows_ == 0 || rows_ == other.
size1()) &&
bool(
"Size mismatch") );
542 assert( (cols_ == 0 || cols_ == other.
size2()) &&
bool(
"Size mismatch") );
544 rows_ = other.
size1();
545 cols_ = other.
size2();
546 nonzeros_ = other.
nnz();
548 viennacl::backend::typesafe_memory_copy<unsigned int>(other.row_buffer_, row_buffer_);
549 viennacl::backend::typesafe_memory_copy<unsigned int>(other.col_buffer_, col_buffer_);
550 viennacl::backend::typesafe_memory_copy<SCALARTYPE>(other.elements_, elements_);
565 void set(
const void * row_jumper,
566 const void * col_buffer,
567 const SCALARTYPE * elements,
572 assert( (rows > 0) &&
bool(
"Error in compressed_matrix::set(): Number of rows must be larger than zero!"));
573 assert( (cols > 0) &&
bool(
"Error in compressed_matrix::set(): Number of columns must be larger than zero!"));
574 assert( (nonzeros > 0) &&
bool(
"Error in compressed_matrix::set(): Number of nonzeros must be larger than zero!"));
586 nonzeros_ = nonzeros;
594 if (new_nonzeros > nonzeros_)
608 nonzeros_ = new_nonzeros;
620 assert(new_size1 > 0 && new_size2 > 0 &&
bool(
"Cannot resize to zero size!"));
622 if (new_size1 != rows_ || new_size2 != cols_)
624 std::vector<std::map<unsigned int, SCALARTYPE> > stl_sparse_matrix;
629 stl_sparse_matrix.resize(rows_);
632 stl_sparse_matrix[0][0] = 0;
634 stl_sparse_matrix.resize(new_size1);
635 stl_sparse_matrix[0][0] = 0;
638 stl_sparse_matrix.resize(new_size1);
641 if (new_size2 < cols_ && rows_ > 0)
643 for (
vcl_size_t i=0; i<stl_sparse_matrix.size(); ++i)
645 std::list<unsigned int> to_delete;
646 for (
typename std::map<unsigned int, SCALARTYPE>::iterator it = stl_sparse_matrix[i].begin();
647 it != stl_sparse_matrix[i].end();
650 if (it->first >= new_size2)
651 to_delete.push_back(it->first);
654 for (std::list<unsigned int>::iterator it = to_delete.begin(); it != to_delete.end(); ++it)
655 stl_sparse_matrix[i].erase(*it);
669 assert( (i < rows_) && (j < cols_) &&
bool(
"compressed_matrix access out of bounds!"));
674 if (index < nonzeros_)
678 std::vector< std::map<unsigned int, SCALARTYPE> > cpu_backup(rows_);
681 cpu_backup[i][
static_cast<unsigned int>(j)] = 0.0;
684 index = element_index(i, j);
686 assert(index < nonzeros_);
714 viennacl::backend::switch_memory_context<unsigned int>(row_buffer_, new_ctx);
715 viennacl::backend::switch_memory_context<unsigned int>(col_buffer_, new_ctx);
716 viennacl::backend::switch_memory_context<SCALARTYPE>(elements_, new_ctx);
734 viennacl::backend::memory_read(col_buffer_, col_indices.element_size()*row_indices[0], row_indices.element_size()*col_indices.size(), col_indices.get());
740 for (
vcl_size_t k=0; k<col_indices.size(); ++k)
742 if (col_indices[k] == j)
743 return row_indices[0] + k;
775 template <
typename T,
unsigned int A>
776 struct op_executor<vector_base<T>, op_assign, vector_expression<const compressed_matrix<T, A>, const vector_base<T>, op_prod> >
778 static void apply(vector_base<T> & lhs, vector_expression<
const compressed_matrix<T, A>,
const vector_base<T>, op_prod>
const & rhs)
792 template <
typename T,
unsigned int A>
793 struct op_executor<vector_base<T>, op_inplace_add, vector_expression<const compressed_matrix<T, A>, const vector_base<T>, op_prod> >
795 static void apply(vector_base<T> & lhs, vector_expression<
const compressed_matrix<T, A>,
const vector_base<T>, op_prod>
const & rhs)
803 template <
typename T,
unsigned int A>
804 struct op_executor<vector_base<T>, op_inplace_sub, vector_expression<const compressed_matrix<T, A>, const vector_base<T>, op_prod> >
806 static void apply(vector_base<T> & lhs, vector_expression<
const compressed_matrix<T, A>,
const vector_base<T>, op_prod>
const & rhs)
816 template <
typename T,
unsigned int A,
typename LHS,
typename RHS,
typename OP>
817 struct op_executor<vector_base<T>, op_assign, vector_expression<const compressed_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
819 static void apply(vector_base<T> & lhs, vector_expression<
const compressed_matrix<T, A>,
const vector_expression<const LHS, const RHS, OP>, op_prod>
const & rhs)
827 template <
typename T,
unsigned int A,
typename LHS,
typename RHS,
typename OP>
828 struct op_executor<vector_base<T>, op_inplace_add, vector_expression<const compressed_matrix<T, A>, vector_expression<const LHS, const RHS, OP>, op_prod> >
830 static void apply(vector_base<T> & lhs, vector_expression<
const compressed_matrix<T, A>, vector_expression<const LHS, const RHS, OP>, op_prod>
const & rhs)
840 template <
typename T,
unsigned int A,
typename LHS,
typename RHS,
typename OP>
841 struct op_executor<vector_base<T>, op_inplace_sub, vector_expression<const compressed_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
843 static void apply(vector_base<T> & lhs, vector_expression<
const compressed_matrix<T, A>,
const vector_expression<const LHS, const RHS, OP>, op_prod>
const & rhs)
scalar< typename viennacl::tools::CHECK_SCALAR_TEMPLATE_ARGUMENT< SCALARTYPE >::ResultType > value_type
Definition: compressed_matrix.hpp:432
Helper class implementing an array on the host. Default case: No conversion necessary.
Definition: util.hpp:95
std::size_t vcl_size_t
Definition: forwards.h:58
This class represents a single scalar value on the GPU and behaves mostly like a built-in scalar type...
Definition: forwards.h:172
void resize(vcl_size_t new_size1, vcl_size_t new_size2, bool preserve=true)
Resize the matrix.
Definition: compressed_matrix.hpp:618
handle_type & handle()
Returns the OpenCL handle to the matrix entry array.
Definition: compressed_matrix.hpp:710
void switch_memory_context(viennacl::context new_ctx)
Definition: compressed_matrix.hpp:712
vcl_size_t size1(MatrixType const &mat)
Generic routine for obtaining the number of rows of a matrix (ViennaCL, uBLAS, etc.)
Definition: size.hpp:216
compressed_matrix(vcl_size_t rows, vcl_size_t cols, vcl_size_t nonzeros=0, viennacl::context ctx=viennacl::context())
Construction of a compressed matrix with the supplied number of rows and columns. If the number of no...
Definition: compressed_matrix.hpp:445
A proxy class for entries in a vector.
vcl_size_t size_type
Definition: compressed_matrix.hpp:433
entry_proxy< SCALARTYPE > operator()(vcl_size_t i, vcl_size_t j)
Returns a reference to the (i,j)-th entry of the sparse matrix. If (i,j) does not exist (zero)...
Definition: compressed_matrix.hpp:667
void copy_impl(const CPU_MATRIX &cpu_matrix, compressed_compressed_matrix< SCALARTYPE > &gpu_matrix, vcl_size_t nonzero_rows, vcl_size_t nonzeros)
Definition: compressed_compressed_matrix.hpp:41
This file provides the forward declarations for the main types used within ViennaCL.
handle_type & handle2()
Returns the OpenCL handle to the column index array.
Definition: compressed_matrix.hpp:708
compressed_matrix()
Default construction of a compressed matrix. No memory is allocated.
Definition: compressed_matrix.hpp:436
handle_type & handle1()
Returns the OpenCL handle to the row index array.
Definition: compressed_matrix.hpp:706
void memory_read(mem_handle const &src_buffer, vcl_size_t src_offset, vcl_size_t bytes_to_read, void *ptr, bool async=false)
Reads data from a buffer back to main RAM.
Definition: memory.hpp:261
const handle_type & handle2() const
Returns the OpenCL handle to the column index array.
Definition: compressed_matrix.hpp:701
result_of::size_type< MatrixType >::type size2(MatrixType const &mat)
Generic routine for obtaining the number of columns of a matrix (ViennaCL, uBLAS, etc...
Definition: size.hpp:245
memory_types
Definition: forwards.h:476
const vcl_size_t & size2() const
Returns the number of columns.
Definition: compressed_matrix.hpp:694
compressed_matrix & operator=(compressed_matrix const &other)
Assignment a compressed matrix from possibly another memory domain.
Definition: compressed_matrix.hpp:539
viennacl::backend::mem_handle handle_type
Definition: compressed_matrix.hpp:431
void set(vcl_size_t index, U value)
Definition: util.hpp:145
const vcl_size_t & nnz() const
Returns the number of nonzero entries.
Definition: compressed_matrix.hpp:696
vcl_size_t element_size(memory_types)
Definition: memory.hpp:299
viennacl::memory_types memory_context() const
Definition: compressed_matrix.hpp:719
Represents a generic 'context' similar to an OpenCL context, but is backend-agnostic and thus also su...
Definition: context.hpp:39
memory_types get_active_handle_id() const
Returns an ID for the currently active memory buffer. Other memory buffers might contain old or no da...
Definition: mem_handle.hpp:91
vcl_size_t size(VectorType const &vec)
Generic routine for obtaining the size of a vector (ViennaCL, uBLAS, etc.)
Definition: size.hpp:144
Definition: forwards.h:480
Implementations of operations using sparse matrices.
void copy(std::vector< SCALARTYPE > &cpu_vec, circulant_matrix< SCALARTYPE, ALIGNMENT > &gpu_mat)
Copies a circulant matrix from the std::vector to the OpenCL device (either GPU or multi-core CPU) ...
Definition: circulant_matrix.hpp:150
const vcl_size_t & size1() const
Returns the number of rows.
Definition: compressed_matrix.hpp:692
viennacl::memory_types memory_type() const
Definition: context.hpp:76
A vector class representing a linear memory sequence on the GPU. Inspired by boost::numeric::ublas::v...
Definition: forwards.h:208
vector_expression< const matrix_base< NumericT, F >, const unsigned int, op_row > row(const matrix_base< NumericT, F > &A, unsigned int i)
Definition: matrix.hpp:910
void memory_copy(mem_handle const &src_buffer, mem_handle &dst_buffer, vcl_size_t src_offset, vcl_size_t dst_offset, vcl_size_t bytes_to_copy)
Copies 'bytes_to_copy' bytes from address 'src_buffer + src_offset' to memory starting at address 'ds...
Definition: memory.hpp:140
vcl_size_t raw_size() const
Returns the number of bytes of the currently active buffer.
Definition: mem_handle.hpp:203
viennacl::context context(T const &t)
Returns an ID for the currently active memory domain of an object.
Definition: context.hpp:41
The vector type with operator-overloads and proxy classes is defined here. Linear algebra operations ...
void reserve(vcl_size_t new_nonzeros)
Allocate memory for the supplied number of nonzeros in the matrix. Old values are preserved...
Definition: compressed_matrix.hpp:592
Main abstraction class for multiple memory domains. Represents a buffer in either main RAM...
Definition: mem_handle.hpp:62
compressed_matrix(viennacl::context ctx)
Definition: compressed_matrix.hpp:498
compressed_matrix(vcl_size_t rows, vcl_size_t cols, viennacl::context ctx)
Construction of a compressed matrix with the supplied number of rows and columns. If the number of no...
Definition: compressed_matrix.hpp:477
A sparse square matrix in compressed sparse rows format.
Definition: compressed_matrix.hpp:428
void prod_impl(const matrix_base< NumericT, F > &mat, const vector_base< NumericT > &vec, vector_base< NumericT > &result)
Carries out matrix-vector multiplication.
Definition: matrix_operations.hpp:350
void memory_create(mem_handle &handle, vcl_size_t size_in_bytes, viennacl::context const &ctx, const void *host_ptr=NULL)
Creates an array of the specified size. If the second argument is provided, the buffer is initialized...
Definition: memory.hpp:87
void switch_active_handle_id(memory_types new_id)
Switches the currently active handle. If no support for that backend is provided, an exception is thr...
Definition: mem_handle.hpp:94
viennacl::backend::mem_handle & handle(T &obj)
Returns the generic memory handle of an object. Non-const version.
Definition: handle.hpp:41
void set(const void *row_jumper, const void *col_buffer, const SCALARTYPE *elements, vcl_size_t rows, vcl_size_t cols, vcl_size_t nonzeros)
Sets the row, column and value arrays of the compressed matrix.
Definition: compressed_matrix.hpp:565
A proxy class for a single element of a vector or matrix. This proxy should not be noticed by end-use...
Definition: forwards.h:178
void memory_shallow_copy(mem_handle const &src_buffer, mem_handle &dst_buffer)
A 'shallow' copy operation from an initialized buffer to an uninitialized buffer. The uninitialized b...
Definition: memory.hpp:177
vcl_size_t element_size() const
Definition: util.hpp:159
const handle_type & handle() const
Returns the OpenCL handle to the matrix entry array.
Definition: compressed_matrix.hpp:703
const handle_type & handle1() const
Returns the OpenCL handle to the row index array.
Definition: compressed_matrix.hpp:699