TensorImagePatch.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
12 
13 namespace Eigen {
14 
29 namespace internal {
30 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
31 struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
32 {
33  typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
34  typedef traits<XprType> XprTraits;
35  typedef typename packet_traits<Scalar>::type Packet;
36  typedef typename XprTraits::StorageKind StorageKind;
37  typedef typename XprTraits::Index Index;
38  typedef typename XprType::Nested Nested;
39  typedef typename remove_reference<Nested>::type _Nested;
40  static const int NumDimensions = XprTraits::NumDimensions + 1;
41  static const int Layout = XprTraits::Layout;
42 };
43 
44 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
45 struct eval<TensorImagePatchOp<Rows, Cols, XprType>, Eigen::Dense>
46 {
47  typedef const TensorImagePatchOp<Rows, Cols, XprType>& type;
48 };
49 
50 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
51 struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorImagePatchOp<Rows, Cols, XprType> >::type>
52 {
53  typedef TensorImagePatchOp<Rows, Cols, XprType> type;
54 };
55 
56 } // end namespace internal
57 
58 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
59 class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors>
60 {
61  public:
62  typedef typename Eigen::internal::traits<TensorImagePatchOp>::Scalar Scalar;
63  typedef typename Eigen::internal::traits<TensorImagePatchOp>::Packet Packet;
64  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
65  typedef typename XprType::CoeffReturnType CoeffReturnType;
66  typedef typename XprType::PacketReturnType PacketReturnType;
67  typedef typename Eigen::internal::nested<TensorImagePatchOp>::type Nested;
68  typedef typename Eigen::internal::traits<TensorImagePatchOp>::StorageKind StorageKind;
69  typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index;
70 
71  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
72  DenseIndex row_strides, DenseIndex col_strides,
73  DenseIndex in_row_strides, DenseIndex in_col_strides,
74  DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
75  PaddingType padding_type, Scalar padding_value)
76  : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
77  m_row_strides(row_strides), m_col_strides(col_strides),
78  m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
79  m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
80  m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
81  m_padding_type(padding_type), m_padding_value(padding_value) {}
82 
83  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
84  DenseIndex row_strides, DenseIndex col_strides,
85  DenseIndex in_row_strides, DenseIndex in_col_strides,
86  DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
87  DenseIndex padding_top, DenseIndex padding_bottom,
88  DenseIndex padding_left, DenseIndex padding_right,
89  Scalar padding_value)
90  : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
91  m_row_strides(row_strides), m_col_strides(col_strides),
92  m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
93  m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
94  m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
95  m_padding_left(padding_left), m_padding_right(padding_right),
96  m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
97 
98  EIGEN_DEVICE_FUNC
99  DenseIndex patch_rows() const { return m_patch_rows; }
100  EIGEN_DEVICE_FUNC
101  DenseIndex patch_cols() const { return m_patch_cols; }
102  EIGEN_DEVICE_FUNC
103  DenseIndex row_strides() const { return m_row_strides; }
104  EIGEN_DEVICE_FUNC
105  DenseIndex col_strides() const { return m_col_strides; }
106  EIGEN_DEVICE_FUNC
107  DenseIndex in_row_strides() const { return m_in_row_strides; }
108  EIGEN_DEVICE_FUNC
109  DenseIndex in_col_strides() const { return m_in_col_strides; }
110  EIGEN_DEVICE_FUNC
111  DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
112  EIGEN_DEVICE_FUNC
113  DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
114  EIGEN_DEVICE_FUNC
115  bool padding_explicit() const { return m_padding_explicit; }
116  EIGEN_DEVICE_FUNC
117  DenseIndex padding_top() const { return m_padding_top; }
118  EIGEN_DEVICE_FUNC
119  DenseIndex padding_bottom() const { return m_padding_bottom; }
120  EIGEN_DEVICE_FUNC
121  DenseIndex padding_left() const { return m_padding_left; }
122  EIGEN_DEVICE_FUNC
123  DenseIndex padding_right() const { return m_padding_right; }
124  EIGEN_DEVICE_FUNC
125  PaddingType padding_type() const { return m_padding_type; }
126  EIGEN_DEVICE_FUNC
127  Scalar padding_value() const { return m_padding_value; }
128 
129  EIGEN_DEVICE_FUNC
130  const typename internal::remove_all<typename XprType::Nested>::type&
131  expression() const { return m_xpr; }
132 
133  protected:
134  typename XprType::Nested m_xpr;
135  const DenseIndex m_patch_rows;
136  const DenseIndex m_patch_cols;
137  const DenseIndex m_row_strides;
138  const DenseIndex m_col_strides;
139  const DenseIndex m_in_row_strides;
140  const DenseIndex m_in_col_strides;
141  const DenseIndex m_row_inflate_strides;
142  const DenseIndex m_col_inflate_strides;
143  const bool m_padding_explicit;
144  const DenseIndex m_padding_top;
145  const DenseIndex m_padding_bottom;
146  const DenseIndex m_padding_left;
147  const DenseIndex m_padding_right;
148  const PaddingType m_padding_type;
149  const Scalar m_padding_value;
150 };
151 
152 // Eval as rvalue
153 template<DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
154 struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
155 {
156  typedef TensorImagePatchOp<Rows, Cols, ArgType> XprType;
157  typedef typename XprType::Index Index;
158  static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
159  static const int NumDims = NumInputDims + 1;
160  typedef DSizes<Index, NumDims> Dimensions;
161  typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
162  typedef TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>,
163  Device> Self;
164  typedef TensorEvaluator<ArgType, Device> Impl;
165 
166  enum {
167  IsAligned = false,
168  PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
169  Layout = TensorEvaluator<ArgType, Device>::Layout,
170  CoordAccess = NumDims == 5,
171  };
172 
173  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
174  : m_impl(op.expression(), device)
175  {
176  EIGEN_STATIC_ASSERT(NumDims >= 4, YOU_MADE_A_PROGRAMMING_MISTAKE);
177 
178  m_paddingValue = op.padding_value();
179 
180  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
181 
182  // Caches a few variables.
183  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
184  m_inputDepth = input_dims[0];
185  m_inputRows = input_dims[1];
186  m_inputCols = input_dims[2];
187  } else {
188  m_inputDepth = input_dims[NumInputDims-1];
189  m_inputRows = input_dims[NumInputDims-2];
190  m_inputCols = input_dims[NumInputDims-3];
191  }
192 
193  m_row_strides = op.row_strides();
194  m_col_strides = op.col_strides();
195 
196  // Input strides and effective input/patch size
197  m_in_row_strides = op.in_row_strides();
198  m_in_col_strides = op.in_col_strides();
199  m_row_inflate_strides = op.row_inflate_strides();
200  m_col_inflate_strides = op.col_inflate_strides();
201  // The "effective" input rows and input cols are the input rows and cols
202  // after inflating them with zeros.
203  // For examples, a 2x3 matrix with row_inflate_strides and
204  // col_inflate_strides of 2 comes from:
205  // A B C
206  // D E F
207  //
208  // to a matrix is 3 x 5:
209  //
210  // A . B . C
211  // . . . . .
212  // D . E . F
213 
214  m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
215  m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
216  m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
217  m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
218 
219  if (op.padding_explicit()) {
220  m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
221  m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
222  m_rowPaddingTop = op.padding_top();
223  m_colPaddingLeft = op.padding_left();
224  } else {
225  // Computing padding from the type
226  switch (op.padding_type()) {
227  case PADDING_VALID:
228  m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
229  m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
230  // Calculate the padding
231  m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
232  m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
233  break;
234  case PADDING_SAME:
235  m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
236  m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
237  // Calculate the padding
238  m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
239  m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
240  break;
241  default:
242  eigen_assert(false && "unexpected padding");
243  }
244  }
245  eigen_assert(m_outputRows > 0);
246  eigen_assert(m_outputCols > 0);
247 
248  // Dimensions for result of extraction.
249  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
250  // ColMajor
251  // 0: depth
252  // 1: patch_rows
253  // 2: patch_cols
254  // 3: number of patches
255  // 4 and beyond: anything else (such as batch).
256  m_dimensions[0] = input_dims[0];
257  m_dimensions[1] = op.patch_rows();
258  m_dimensions[2] = op.patch_cols();
259  m_dimensions[3] = m_outputRows * m_outputCols;
260  for (int i = 4; i < NumDims; ++i) {
261  m_dimensions[i] = input_dims[i-1];
262  }
263  } else {
264  // RowMajor
265  // NumDims-1: depth
266  // NumDims-2: patch_rows
267  // NumDims-3: patch_cols
268  // NumDims-4: number of patches
269  // NumDims-5 and beyond: anything else (such as batch).
270  m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
271  m_dimensions[NumDims-2] = op.patch_rows();
272  m_dimensions[NumDims-3] = op.patch_cols();
273  m_dimensions[NumDims-4] = m_outputRows * m_outputCols;
274  for (int i = NumDims-5; i >= 0; --i) {
275  m_dimensions[i] = input_dims[i];
276  }
277  }
278 
279  // Strides for moving the patch in various dimensions.
280  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
281  m_colStride = m_dimensions[1];
282  m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
283  m_otherStride = m_patchStride * m_dimensions[3];
284  } else {
285  m_colStride = m_dimensions[NumDims-2];
286  m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1];
287  m_otherStride = m_patchStride * m_dimensions[NumDims-4];
288  }
289 
290  // Strides for navigating through the input tensor.
291  m_rowInputStride = m_inputDepth;
292  m_colInputStride = m_inputDepth * m_inputRows;
293  m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols;
294 
295  // Fast representations of different variables.
296  m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
297  m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
298  m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
299  m_fastInputRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
300  m_fastInputColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
301  m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
302 
303  // Number of patches in the width dimension.
304  m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
305  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
306  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
307  } else {
308  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
309  }
310  }
311 
312  typedef typename XprType::CoeffReturnType CoeffReturnType;
313  typedef typename XprType::PacketReturnType PacketReturnType;
314 
315  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
316 
317  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
318  m_impl.evalSubExprsIfNeeded(NULL);
319  return true;
320  }
321 
322  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
323  m_impl.cleanup();
324  }
325 
326  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
327  {
328  // Patch index corresponding to the passed in index.
329  const Index patchIndex = index / m_fastPatchStride;
330  // Find the offset of the element wrt the location of the first element.
331  const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
332 
333  // Other ways to index this element.
334  const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
335  const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
336 
337  // Calculate col index in the input original tensor.
338  const Index colIndex = patch2DIndex / m_fastOutputRows;
339  const Index colOffset = patchOffset / m_fastColStride;
340  const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
341  const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
342  if (inputCol < 0 || inputCol >= m_input_cols_eff ||
343  ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
344  return Scalar(m_paddingValue);
345  }
346 
347  // Calculate row index in the original input tensor.
348  const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
349  const Index rowOffset = patchOffset - colOffset * m_colStride;
350  const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
351  const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
352  if (inputRow < 0 || inputRow >= m_input_rows_eff ||
353  ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
354  return Scalar(m_paddingValue);
355  }
356 
357  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
358  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
359 
360  const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride;
361  return m_impl.coeff(inputIndex);
362  }
363 
364  template<int LoadMode>
365  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
366  {
367  const Index packetSize = internal::unpacket_traits<PacketReturnType>::size;
368  EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
369  eigen_assert(index+packetSize-1 < dimensions().TotalSize());
370 
371  if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) {
372  return packetWithPossibleZero(index);
373  }
374 
375  const Index indices[2] = {index, index + packetSize - 1};
376  const Index patchIndex = indices[0] / m_fastPatchStride;
377  if (patchIndex != indices[1] / m_fastPatchStride) {
378  return packetWithPossibleZero(index);
379  }
380  const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride;
381  eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
382 
383  // Find the offset of the element wrt the location of the first element.
384  const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
385  (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
386 
387  const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
388  eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
389 
390  const Index colIndex = patch2DIndex / m_fastOutputRows;
391  const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
392 
393  // Calculate col indices in the original input tensor.
394  const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] -
395  m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
396  if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
397  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
398  }
399 
400  if (inputCols[0] == inputCols[1]) {
401  const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
402  const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride};
403  eigen_assert(rowOffsets[0] <= rowOffsets[1]);
404  // Calculate col indices in the original input tensor.
405  const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] -
406  m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
407 
408  if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
409  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
410  }
411 
412  if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
413  // no padding
414  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
415  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
416  const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride;
417  return m_impl.template packet<Unaligned>(inputIndex);
418  }
419  }
420 
421  return packetWithPossibleZero(index);
422  }
423 
424  EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
425 
426  const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
427 
428  Index rowPaddingTop() const { return m_rowPaddingTop; }
429  Index colPaddingLeft() const { return m_colPaddingLeft; }
430  Index outputRows() const { return m_outputRows; }
431  Index outputCols() const { return m_outputCols; }
432  Index userRowStride() const { return m_row_strides; }
433  Index userColStride() const { return m_col_strides; }
434  Index userInRowStride() const { return m_in_row_strides; }
435  Index userInColStride() const { return m_in_col_strides; }
436  Index rowInflateStride() const { return m_row_inflate_strides; }
437  Index colInflateStride() const { return m_col_inflate_strides; }
438 
439  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<Index, NumDims>& coords) const
440  {
441  // Location of the first element of the patch.
442  // ColMajor
443  // 0: d, 1: patch_rows, 2: patch_cols, 3: number of patches, 4: number of batches
444  // RowMajor
445  // 0: number of batches, 1: number of patches, 2: patch_cols , 3: patch_rows, 4: d
446  const Index patch2DIndex = coords[static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 3 : 1];
447 
448  array<Index, NumDims-1> inputCoords;
449  Index input_col_idx = patch2DIndex / m_fastInputColsEff;
450  Index inputCol = input_col_idx + coords[1] * m_in_row_strides - m_rowPaddingTop;
451  Index inputRow = patch2DIndex - input_col_idx * m_input_cols_eff + coords[2] * m_in_col_strides - m_colPaddingLeft;
452  const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
453  const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
454  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
455  inputCoords[0] = coords[0]; // depth
456  inputCoords[1] = origInputCol;
457  inputCoords[2] = origInputRow;
458  inputCoords[3] = coords[4]; // batch
459  } else {
460  inputCoords[3] = coords[4]; // depth
461  inputCoords[2] = origInputCol;
462  inputCoords[1] = origInputRow;
463  inputCoords[0] = coords[0]; // batch
464  }
465  // If the computed coordinates are outside the original image perimeter, return 0.
466  if (inputCol < 0 || inputCol >= m_input_cols_eff || inputRow < 0 || inputRow >= m_input_rows_eff ||
467  ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides)) ||
468  ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
469  return Scalar(m_paddingValue);
470  }
471  if (TensorEvaluator<ArgType, Device>::CoordAccess) {
472  return m_impl.coeff(inputCoords);
473  } else {
474  Index inputIndex;
475  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
476  inputIndex =
477  inputCoords[3] * m_patchInputStride +
478  inputCoords[2] * m_colInputStride +
479  inputCoords[1] * m_rowInputStride +
480  inputCoords[0];
481  } else {
482  inputIndex =
483  inputCoords[1] * m_patchInputStride +
484  inputCoords[2] * m_colInputStride +
485  inputCoords[3] * m_rowInputStride +
486  inputCoords[4];
487  }
488  return m_impl.coeff(inputIndex);
489  }
490  }
491 
492  protected:
493  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
494  {
495  const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
496  EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
497  for (int i = 0; i < packetSize; ++i) {
498  values[i] = coeff(index+i);
499  }
500  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
501  return rslt;
502  }
503 
504  Dimensions m_dimensions;
505 
506  Index m_otherStride;
507  Index m_patchStride;
508  Index m_colStride;
509  Index m_row_strides;
510  Index m_col_strides;
511 
512  Index m_in_row_strides;
513  Index m_in_col_strides;
514  Index m_row_inflate_strides;
515  Index m_col_inflate_strides;
516 
517  Index m_input_rows_eff;
518  Index m_input_cols_eff;
519  Index m_patch_rows_eff;
520  Index m_patch_cols_eff;
521 
522  internal::TensorIntDivisor<Index> m_fastOtherStride;
523  internal::TensorIntDivisor<Index> m_fastPatchStride;
524  internal::TensorIntDivisor<Index> m_fastColStride;
525  internal::TensorIntDivisor<Index> m_fastInputRowStride;
526  internal::TensorIntDivisor<Index> m_fastInputColStride;
527  internal::TensorIntDivisor<Index> m_fastInputColsEff;
528 
529  Index m_rowInputStride;
530  Index m_colInputStride;
531  Index m_patchInputStride;
532 
533  Index m_inputDepth;
534  Index m_inputRows;
535  Index m_inputCols;
536 
537  Index m_outputRows;
538  Index m_outputCols;
539 
540  Index m_rowPaddingTop;
541  Index m_colPaddingLeft;
542 
543  internal::TensorIntDivisor<Index> m_fastOutputRows;
544  internal::TensorIntDivisor<Index> m_fastOutputDepth;
545 
546  Scalar m_paddingValue;
547 
548  TensorEvaluator<ArgType, Device> m_impl;
549 };
550 
551 
552 } // end namespace Eigen
553 
554 #endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13