Tensor.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_H
13 
14 namespace Eigen {
15 
62 template<typename Scalar_, std::size_t NumIndices_, int Options_, typename IndexType_>
63 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
64 {
65  public:
68  typedef typename Eigen::internal::nested<Self>::type Nested;
69  typedef typename internal::traits<Self>::StorageKind StorageKind;
70  typedef typename internal::traits<Self>::Index Index;
71  typedef Scalar_ Scalar;
72  typedef typename internal::packet_traits<Scalar>::type Packet;
73  typedef typename NumTraits<Scalar>::Real RealScalar;
74  typedef typename Base::CoeffReturnType CoeffReturnType;
75  typedef typename Base::PacketReturnType PacketReturnType;
76 
77  enum {
78  IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
79  PacketAccess = (internal::packet_traits<Scalar>::size > 1),
80  Layout = Options_ & RowMajor ? RowMajor : ColMajor,
81  CoordAccess = true,
82  };
83 
84  static const int Options = Options_;
85  static const std::size_t NumIndices = NumIndices_;
86  typedef DSizes<Index, NumIndices_> Dimensions;
87 
88  protected:
89  TensorStorage<Scalar, Dimensions, Options> m_storage;
90 
91  public:
92  // Metadata
93  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
94  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
95  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
96  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
97  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
98  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
99 
100  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
101  // work, because that uses base().coeffRef() - and we don't yet
102  // implement a similar class hierarchy
103  inline Self& base() { return *this; }
104  inline const Self& base() const { return *this; }
105 
106 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
107  template<typename... IndexTypes>
108  EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
109  {
110  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
111  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
112  return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
113  }
114 #endif
115 
116  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
117  {
118  eigen_internal_assert(checkIndexRange(indices));
119  return m_storage.data()[linearizedIndex(indices)];
120  }
121 
122  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
123  {
124  eigen_internal_assert(index >= 0 && index < size());
125  return m_storage.data()[index];
126  }
127 
128 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
129  template<typename... IndexTypes>
130  inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
131  {
132  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
133  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
134  return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
135  }
136 #endif
137 
138  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
139  {
140  eigen_internal_assert(checkIndexRange(indices));
141  return m_storage.data()[linearizedIndex(indices)];
142  }
143 
144  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
145  {
146  eigen_internal_assert(index >= 0 && index < size());
147  return m_storage.data()[index];
148  }
149 
150 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
151  template<typename... IndexTypes>
152  inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
153  {
154  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
155  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
156  return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
157  }
158 #else
159  EIGEN_DEVICE_FUNC
160  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
161  {
162  return coeff(array<Index, 2>(i0, i1));
163  }
164  EIGEN_DEVICE_FUNC
165  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
166  {
167  return coeff(array<Index, 3>(i0, i1, i2));
168  }
169  EIGEN_DEVICE_FUNC
170  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
171  {
172  return coeff(array<Index, 4>(i0, i1, i2, i3));
173  }
174  EIGEN_DEVICE_FUNC
175  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
176  {
177  return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
178  }
179 #endif
180 
181  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
182  {
183  eigen_assert(checkIndexRange(indices));
184  return coeff(indices);
185  }
186 
187  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
188  {
189  eigen_internal_assert(index >= 0 && index < size());
190  return coeff(index);
191  }
192 
193  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
194  {
195  // The bracket operator is only for vectors, use the parenthesis operator instead.
196  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
197  return coeff(index);
198  }
199 
200 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
201  template<typename... IndexTypes>
202  inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
203  {
204  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
205  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
206  return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
207  }
208 #else
209  EIGEN_DEVICE_FUNC
210  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
211  {
212  return coeffRef(array<Index, 2>(i0, i1));
213  }
214  EIGEN_DEVICE_FUNC
215  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
216  {
217  return coeffRef(array<Index, 3>(i0, i1, i2));
218  }
219  EIGEN_DEVICE_FUNC
220  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
221  {
222  return coeffRef(array<Index, 4>(i0, i1, i2, i3));
223  }
224  EIGEN_DEVICE_FUNC
225  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
226  {
227  return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
228  }
229 #endif
230 
231  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
232  {
233  eigen_assert(checkIndexRange(indices));
234  return coeffRef(indices);
235  }
236 
237  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
238  {
239  eigen_assert(index >= 0 && index < size());
240  return coeffRef(index);
241  }
242 
243  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
244  {
245  // The bracket operator is only for vectors, use the parenthesis operator instead
246  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
247  return coeffRef(index);
248  }
249 
250  EIGEN_DEVICE_FUNC
251  EIGEN_STRONG_INLINE Tensor()
252  : m_storage()
253  {
254  }
255 
256  EIGEN_DEVICE_FUNC
257  EIGEN_STRONG_INLINE Tensor(const Self& other)
258  : m_storage(other.m_storage)
259  {
260  }
261 
262 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
263  template<typename... IndexTypes>
264  inline Tensor(Index firstDimension, IndexTypes... otherDimensions)
265  : m_storage(internal::array_prod(array<Index, NumIndices>{{firstDimension, otherDimensions...}}), array<Index, NumIndices>{{firstDimension, otherDimensions...}})
266  {
267  // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
268  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
269  }
270 #else
271  inline explicit Tensor(Index dim1)
272  : m_storage(dim1, array<Index, 1>(dim1))
273  {
274  EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
275  }
276  inline explicit Tensor(Index dim1, Index dim2)
277  : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
278  {
279  EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
280  }
281  inline explicit Tensor(Index dim1, Index dim2, Index dim3)
282  : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
283  {
284  EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
285  }
286  inline explicit Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
287  : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
288  {
289  EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
290  }
291  inline explicit Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
292  : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 4>(dim1, dim2, dim3, dim4, dim5))
293  {
294  EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
295  }
296 #endif
297 
298  inline explicit Tensor(const array<Index, NumIndices>& dimensions)
299  : m_storage(internal::array_prod(dimensions), dimensions)
300  {
301  EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
302  }
303 
304  template<typename OtherDerived>
305  EIGEN_DEVICE_FUNC
306  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
307  {
308  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
309  Assign assign(*this, other.derived());
310  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
311  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
312  }
313  template<typename OtherDerived>
314  EIGEN_DEVICE_FUNC
315  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
316  {
317  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
318  Assign assign(*this, other.derived());
319  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
320  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
321  }
322 
323  EIGEN_DEVICE_FUNC
324  EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
325  {
326  typedef TensorAssignOp<Tensor, const Tensor> Assign;
327  Assign assign(*this, other);
328  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
329  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
330  return *this;
331  }
332  template<typename OtherDerived>
333  EIGEN_DEVICE_FUNC
334  EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
335  {
336  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
337  Assign assign(*this, other);
338  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
339  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
340  return *this;
341  }
342 
343 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
344  template<typename... IndexTypes> EIGEN_DEVICE_FUNC
345  void resize(Index firstDimension, IndexTypes... otherDimensions)
346  {
347  // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
348  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
349  resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
350  }
351 #endif
352 
353  EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
354  {
355  std::size_t i;
356  Index size = Index(1);
357  for (i = 0; i < NumIndices; i++) {
358  internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
359  size *= dimensions[i];
360  }
361  #ifdef EIGEN_INITIALIZE_COEFFS
362  bool size_changed = size != this->size();
363  m_storage.resize(size, dimensions);
364  if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
365  #else
366  m_storage.resize(size, dimensions);
367  #endif
368  }
369 
370  EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
371  array<Index, NumIndices> dims;
372  for (std::size_t i = 0; i < NumIndices; ++i) {
373  dims[i] = dimensions[i];
374  }
375  resize(dims);
376  }
377 
378 #ifndef EIGEN_EMULATE_CXX11_META_H
379  template <typename std::ptrdiff_t... Indices>
380  EIGEN_DEVICE_FUNC
381  void resize(const Sizes<Indices...>& dimensions) {
382  array<Index, NumIndices> dims;
383  for (std::size_t i = 0; i < NumIndices; ++i) {
384  dims[i] = static_cast<Index>(dimensions[i]);
385  }
386  resize(dims);
387  }
388 #else
389  template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
390  EIGEN_DEVICE_FUNC
391  void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
392  array<Index, NumIndices> dims;
393  for (std::size_t i = 0; i < NumIndices; ++i) {
394  dims[i] = static_cast<Index>(dimensions[i]);
395  }
396  resize(dims);
397  }
398 #endif
399 
400  protected:
401 
402  bool checkIndexRange(const array<Index, NumIndices>& indices) const
403  {
404  using internal::array_apply_and_reduce;
405  using internal::array_zip_and_reduce;
406  using internal::greater_equal_zero_op;
407  using internal::logical_and_op;
408  using internal::lesser_op;
409 
410  return
411  // check whether the indices are all >= 0
412  array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
413  // check whether the indices fit in the dimensions
414  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
415  }
416 
417  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
418  {
419  if (Options&RowMajor) {
420  return m_storage.dimensions().IndexOfRowMajor(indices);
421  } else {
422  return m_storage.dimensions().IndexOfColMajor(indices);
423  }
424  }
425 };
426 
427 } // end namespace Eigen
428 
429 #endif // EIGEN_CXX11_TENSOR_TENSOR_H
Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13
The tensor evaluator classes.
Definition: TensorEvaluator.h:28
The tensor base class.
Definition: TensorForwardDeclarations.h:19
The tensor class.
Definition: Tensor.h:63