10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
23 template<
typename NewDimensions,
typename XprType>
24 struct traits<TensorReshapingOp<NewDimensions, XprType> > :
public traits<XprType>
26 typedef typename XprType::Scalar Scalar;
27 typedef traits<XprType> XprTraits;
28 typedef typename packet_traits<Scalar>::type Packet;
29 typedef typename XprTraits::StorageKind StorageKind;
30 typedef typename XprTraits::Index Index;
31 typedef typename XprType::Nested Nested;
32 typedef typename remove_reference<Nested>::type _Nested;
33 static const int NumDimensions = array_size<NewDimensions>::value;
34 static const int Layout = XprTraits::Layout;
37 template<
typename NewDimensions,
typename XprType>
38 struct eval<TensorReshapingOp<NewDimensions, XprType>,
Eigen::Dense>
40 typedef const TensorReshapingOp<NewDimensions, XprType>& type;
43 template<
typename NewDimensions,
typename XprType>
44 struct nested<TensorReshapingOp<NewDimensions, XprType>, 1, typename eval<TensorReshapingOp<NewDimensions, XprType> >::type>
46 typedef TensorReshapingOp<NewDimensions, XprType> type;
53 template<
typename NewDimensions,
typename XprType>
54 class TensorReshapingOp :
public TensorBase<TensorReshapingOp<NewDimensions, XprType>, WriteAccessors>
57 typedef typename Eigen::internal::traits<TensorReshapingOp>::Scalar Scalar;
58 typedef typename Eigen::internal::traits<TensorReshapingOp>::Packet Packet;
59 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
60 typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
61 typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
62 typedef typename Eigen::internal::nested<TensorReshapingOp>::type Nested;
63 typedef typename Eigen::internal::traits<TensorReshapingOp>::StorageKind StorageKind;
64 typedef typename Eigen::internal::traits<TensorReshapingOp>::Index Index;
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(
const XprType& expr,
const NewDimensions& dims)
67 : m_xpr(expr), m_dims(dims) {}
70 const NewDimensions& dimensions()
const {
return m_dims; }
73 const typename internal::remove_all<typename XprType::Nested>::type&
74 expression()
const {
return m_xpr; }
77 EIGEN_STRONG_INLINE TensorReshapingOp& operator = (
const TensorReshapingOp& other)
79 typedef TensorAssignOp<TensorReshapingOp, const TensorReshapingOp> Assign;
80 Assign assign(*
this, other);
81 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
85 template<
typename OtherDerived>
87 EIGEN_STRONG_INLINE TensorReshapingOp& operator = (
const OtherDerived& other)
89 typedef TensorAssignOp<TensorReshapingOp, const OtherDerived> Assign;
90 Assign assign(*
this, other);
91 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
96 typename XprType::Nested m_xpr;
97 const NewDimensions m_dims;
102 template<
typename NewDimensions,
typename ArgType,
typename Device>
103 struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
105 typedef TensorReshapingOp<NewDimensions, ArgType> XprType;
106 typedef NewDimensions Dimensions;
109 IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
110 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
111 Layout = TensorEvaluator<ArgType, Device>::Layout,
115 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
116 : m_impl(op.expression(), device), m_dimensions(op.dimensions())
120 eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions()));
123 typedef typename XprType::Index Index;
124 typedef typename XprType::Scalar Scalar;
125 typedef typename XprType::CoeffReturnType CoeffReturnType;
126 typedef typename XprType::PacketReturnType PacketReturnType;
128 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
130 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(CoeffReturnType* data) {
131 return m_impl.evalSubExprsIfNeeded(data);
133 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup() {
137 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
139 return m_impl.coeff(index);
142 template<
int LoadMode>
143 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
145 return m_impl.template packet<LoadMode>(index);
148 EIGEN_DEVICE_FUNC CoeffReturnType* data()
const {
return m_impl.data(); }
150 const TensorEvaluator<ArgType, Device>& impl()
const {
return m_impl; }
153 TensorEvaluator<ArgType, Device> m_impl;
154 NewDimensions m_dimensions;
159 template<
typename NewDimensions,
typename ArgType,
typename Device>
160 struct TensorEvaluator<TensorReshapingOp<NewDimensions, ArgType>, Device>
161 :
public TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
164 typedef TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device> Base;
165 typedef TensorReshapingOp<NewDimensions, ArgType> XprType;
166 typedef NewDimensions Dimensions;
169 IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
170 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
171 Layout = TensorEvaluator<ArgType, Device>::Layout,
175 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
179 typedef typename XprType::Index Index;
180 typedef typename XprType::Scalar Scalar;
181 typedef typename XprType::CoeffReturnType CoeffReturnType;
182 typedef typename XprType::PacketReturnType PacketReturnType;
184 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
186 return this->m_impl.coeffRef(index);
188 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
189 void writePacket(Index index,
const PacketReturnType& x)
191 this->m_impl.template writePacket<StoreMode>(index, x);
204 template<
typename StartIndices,
typename Sizes,
typename XprType>
205 struct traits<TensorSlicingOp<StartIndices, Sizes, XprType> > :
public traits<XprType>
207 typedef typename XprType::Scalar Scalar;
208 typedef traits<XprType> XprTraits;
209 typedef typename packet_traits<Scalar>::type Packet;
210 typedef typename XprTraits::StorageKind StorageKind;
211 typedef typename XprTraits::Index Index;
212 typedef typename XprType::Nested Nested;
213 typedef typename remove_reference<Nested>::type _Nested;
214 static const int NumDimensions = array_size<StartIndices>::value;
215 static const int Layout = XprTraits::Layout;
218 template<
typename StartIndices,
typename Sizes,
typename XprType>
219 struct eval<TensorSlicingOp<StartIndices, Sizes, XprType>,
Eigen::Dense>
221 typedef const TensorSlicingOp<StartIndices, Sizes, XprType>& type;
224 template<
typename StartIndices,
typename Sizes,
typename XprType>
225 struct nested<TensorSlicingOp<StartIndices, Sizes, XprType>, 1, typename eval<TensorSlicingOp<StartIndices, Sizes, XprType> >::type>
227 typedef TensorSlicingOp<StartIndices, Sizes, XprType> type;
234 template<
typename StartIndices,
typename Sizes,
typename XprType>
235 class TensorSlicingOp :
public TensorBase<TensorSlicingOp<StartIndices, Sizes, XprType> >
238 typedef typename Eigen::internal::traits<TensorSlicingOp>::Scalar Scalar;
239 typedef typename Eigen::internal::traits<TensorSlicingOp>::Packet Packet;
240 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
241 typedef typename XprType::CoeffReturnType CoeffReturnType;
242 typedef typename XprType::PacketReturnType PacketReturnType;
243 typedef typename Eigen::internal::nested<TensorSlicingOp>::type Nested;
244 typedef typename Eigen::internal::traits<TensorSlicingOp>::StorageKind StorageKind;
245 typedef typename Eigen::internal::traits<TensorSlicingOp>::Index Index;
247 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(
const XprType& expr,
const StartIndices& indices,
const Sizes& sizes)
248 : m_xpr(expr), m_indices(indices), m_sizes(sizes) {}
251 const StartIndices& startIndices()
const {
return m_indices; }
253 const Sizes& sizes()
const {
return m_sizes; }
256 const typename internal::remove_all<typename XprType::Nested>::type&
257 expression()
const {
return m_xpr; }
259 template<
typename OtherDerived>
261 EIGEN_STRONG_INLINE TensorSlicingOp& operator = (
const OtherDerived& other)
263 typedef TensorAssignOp<TensorSlicingOp, const OtherDerived> Assign;
264 Assign assign(*
this, other);
265 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
270 EIGEN_STRONG_INLINE TensorSlicingOp& operator = (
const TensorSlicingOp& other)
272 typedef TensorAssignOp<TensorSlicingOp, const TensorSlicingOp> Assign;
273 Assign assign(*
this, other);
274 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
280 typename XprType::Nested m_xpr;
281 const StartIndices m_indices;
288 template <
typename Index,
typename Device>
struct MemcpyTriggerForSlicing {
289 EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(
const Device& device) : threshold_(2 * device.numThreads()) { }
290 EIGEN_DEVICE_FUNC
bool operator ()(Index val)
const {
return val > threshold_; }
299 template <
typename Index>
struct MemcpyTriggerForSlicing<Index, GpuDevice> {
300 EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(
const GpuDevice&) { }
301 EIGEN_DEVICE_FUNC
bool operator ()(Index val)
const {
return val > 4*1024*1024; }
307 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
308 struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
310 typedef TensorSlicingOp<StartIndices, Sizes, ArgType> XprType;
311 static const int NumDims = internal::array_size<Sizes>::value;
317 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
318 Layout = TensorEvaluator<ArgType, Device>::Layout,
319 CoordAccess = TensorEvaluator<ArgType, Device>::CoordAccess,
322 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
323 : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices())
325 for (std::size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
326 eigen_assert(m_impl.dimensions()[i] >= op.sizes()[i] + op.startIndices()[i]);
329 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
330 const Sizes& output_dims = op.sizes();
331 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
332 m_inputStrides[0] = 1;
333 for (
int i = 1; i < NumDims; ++i) {
334 m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
338 m_outputStrides[0] = 1;
339 for (
int i = 1; i < NumDims; ++i) {
340 m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
341 m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
344 m_inputStrides[NumDims-1] = 1;
345 for (
int i = NumDims - 2; i >= 0; --i) {
346 m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
350 m_outputStrides[NumDims-1] = 1;
351 for (
int i = NumDims - 2; i >= 0; --i) {
352 m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1];
353 m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
358 typedef typename XprType::Index Index;
359 typedef typename XprType::Scalar Scalar;
360 typedef typename XprType::CoeffReturnType CoeffReturnType;
361 typedef typename XprType::PacketReturnType PacketReturnType;
362 typedef Sizes Dimensions;
364 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
367 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(CoeffReturnType* data) {
368 m_impl.evalSubExprsIfNeeded(NULL);
369 if (!NumTraits<
typename internal::remove_const<Scalar>::type>::RequireInitialization && data && m_impl.data()) {
370 Index contiguous_values = 1;
371 if (static_cast<int>(Layout) ==
static_cast<int>(ColMajor)) {
372 for (
int i = 0; i < NumDims; ++i) {
373 contiguous_values *= dimensions()[i];
374 if (dimensions()[i] != m_impl.dimensions()[i]) {
379 for (
int i = NumDims-1; i >= 0; --i) {
380 contiguous_values *= dimensions()[i];
381 if (dimensions()[i] != m_impl.dimensions()[i]) {
387 const MemcpyTriggerForSlicing<Index, Device> trigger(m_device);
388 if (trigger(contiguous_values)) {
389 Scalar* src = (Scalar*)m_impl.data();
390 for (
int i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) {
391 Index offset = srcCoeff(i);
392 m_device.memcpy((
void*)(data+i), src+offset, contiguous_values *
sizeof(Scalar));
400 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup() {
404 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
406 return m_impl.coeff(srcCoeff(index));
409 template<
int LoadMode>
410 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
412 const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
413 EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
414 eigen_assert(index+packetSize-1 < internal::array_prod(dimensions()));
416 Index inputIndices[] = {0, 0};
417 Index indices[] = {index, index + packetSize - 1};
418 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
419 for (
int i = NumDims - 1; i > 0; --i) {
420 const Index idx0 = indices[0] / m_fastOutputStrides[i];
421 const Index idx1 = indices[1] / m_fastOutputStrides[i];
422 inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
423 inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
424 indices[0] -= idx0 * m_outputStrides[i];
425 indices[1] -= idx1 * m_outputStrides[i];
427 inputIndices[0] += (indices[0] + m_offsets[0]);
428 inputIndices[1] += (indices[1] + m_offsets[0]);
430 for (
int i = 0; i < NumDims - 1; ++i) {
431 const Index idx0 = indices[0] / m_fastOutputStrides[i];
432 const Index idx1 = indices[1] / m_fastOutputStrides[i];
433 inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
434 inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
435 indices[0] -= idx0 * m_outputStrides[i];
436 indices[1] -= idx1 * m_outputStrides[i];
438 inputIndices[0] += (indices[0] + m_offsets[NumDims-1]);
439 inputIndices[1] += (indices[1] + m_offsets[NumDims-1]);
441 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
442 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
446 typename internal::remove_const<CoeffReturnType>::type values[packetSize];
447 values[0] = m_impl.coeff(inputIndices[0]);
448 values[packetSize-1] = m_impl.coeff(inputIndices[1]);
449 for (
int i = 1; i < packetSize-1; ++i) {
450 values[i] = coeff(index+i);
452 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
457 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(
const array<Index, NumDims>& coords)
459 array<Index, NumDims> inputCoords;
460 for (
int i = 0; i < NumDims; ++i) {
461 inputCoords = coords[i] + this->m_offsets[i];
463 return m_impl.coeff(inputCoords);
466 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data()
const {
467 CoeffReturnType* result = m_impl.data();
470 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
471 for (
int i = 0; i < NumDims; ++i) {
472 if (m_dimensions[i] != m_impl.dimensions()[i]) {
473 offset += m_offsets[i] * m_inputStrides[i];
474 for (
int j = i+1; j < NumDims; ++j) {
475 if (m_dimensions[j] > 1) {
478 offset += m_offsets[j] * m_inputStrides[j];
484 for (
int i = NumDims - 1; i >= 0; --i) {
485 if (m_dimensions[i] != m_impl.dimensions()[i]) {
486 offset += m_offsets[i] * m_inputStrides[i];
487 for (
int j = i-1; j >= 0; --j) {
488 if (m_dimensions[j] > 1) {
491 offset += m_offsets[j] * m_inputStrides[j];
497 return result + offset;
503 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index)
const
505 Index inputIndex = 0;
506 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
507 for (
int i = NumDims - 1; i > 0; --i) {
508 const Index idx = index / m_fastOutputStrides[i];
509 inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
510 index -= idx * m_outputStrides[i];
512 inputIndex += (index + m_offsets[0]);
514 for (
int i = 0; i < NumDims - 1; ++i) {
515 const Index idx = index / m_fastOutputStrides[i];
516 inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
517 index -= idx * m_outputStrides[i];
519 inputIndex += (index + m_offsets[NumDims-1]);
524 array<Index, NumDims> m_outputStrides;
525 array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
526 array<Index, NumDims> m_inputStrides;
527 TensorEvaluator<ArgType, Device> m_impl;
528 const Device& m_device;
529 Dimensions m_dimensions;
530 const StartIndices m_offsets;
535 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
536 struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
537 :
public TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
539 typedef TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device> Base;
540 typedef TensorSlicingOp<StartIndices, Sizes, ArgType> XprType;
541 static const int NumDims = internal::array_size<Sizes>::value;
545 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
546 Layout = TensorEvaluator<ArgType, Device>::Layout,
547 CoordAccess = TensorEvaluator<ArgType, Device>::CoordAccess,
550 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
554 typedef typename XprType::Index Index;
555 typedef typename XprType::Scalar Scalar;
556 typedef typename XprType::CoeffReturnType CoeffReturnType;
557 typedef typename XprType::PacketReturnType PacketReturnType;
558 typedef Sizes Dimensions;
560 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
562 return this->m_impl.coeffRef(this->srcCoeff(index));
565 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
566 void writePacket(Index index,
const PacketReturnType& x)
568 const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
569 Index inputIndices[] = {0, 0};
570 Index indices[] = {index, index + packetSize - 1};
571 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
572 for (
int i = NumDims - 1; i > 0; --i) {
573 const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
574 const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
575 inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
576 inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
577 indices[0] -= idx0 * this->m_outputStrides[i];
578 indices[1] -= idx1 * this->m_outputStrides[i];
580 inputIndices[0] += (indices[0] + this->m_offsets[0]);
581 inputIndices[1] += (indices[1] + this->m_offsets[0]);
583 for (
int i = 0; i < NumDims - 1; ++i) {
584 const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
585 const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
586 inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
587 inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
588 indices[0] -= idx0 * this->m_outputStrides[i];
589 indices[1] -= idx1 * this->m_outputStrides[i];
591 inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]);
592 inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]);
594 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
595 this->m_impl.template writePacket<StoreMode>(inputIndices[0], x);
598 EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
599 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
600 this->m_impl.coeffRef(inputIndices[0]) = values[0];
601 this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1];
602 for (
int i = 1; i < packetSize-1; ++i) {
603 this->coeffRef(index+i) = values[i];
608 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(
const array<Index, NumDims>& coords)
610 array<Index, NumDims> inputCoords;
611 for (
int i = 0; i < NumDims; ++i) {
612 inputCoords = coords[i] + this->m_offsets[i];
614 return this->m_impl.coeffRef(inputCoords);
621 #endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13