Eigen  3.2.91
SSE/PacketMath.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PACKET_MATH_SSE_H
11 #define EIGEN_PACKET_MATH_SSE_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19 #endif
20 
21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23 #endif
24 
25 #ifdef __FMA__
26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1
28 #endif
29 #endif
30 
31 #if (defined EIGEN_VECTORIZE_AVX) && EIGEN_COMP_GNUC_STRICT && (__GXX_ABI_VERSION < 1004)
32 // With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot
33 // have overloads for both types without linking error.
34 // One solution is to increase ABI version using -fabi-version=4 (or greater).
35 // Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
36 // structure:
37 template<typename T>
38 struct eigen_packet_wrapper
39 {
40  EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
41  EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
42  EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
43  EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
44  EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
45  m_val = v;
46  return *this;
47  }
48 
49  T m_val;
50 };
51 typedef eigen_packet_wrapper<__m128> Packet4f;
52 typedef eigen_packet_wrapper<__m128i> Packet4i;
53 typedef eigen_packet_wrapper<__m128d> Packet2d;
54 #else
55 typedef __m128 Packet4f;
56 typedef __m128i Packet4i;
57 typedef __m128d Packet2d;
58 #endif
59 
60 template<> struct is_arithmetic<__m128> { enum { value = true }; };
61 template<> struct is_arithmetic<__m128i> { enum { value = true }; };
62 template<> struct is_arithmetic<__m128d> { enum { value = true }; };
63 
64 #define vec4f_swizzle1(v,p,q,r,s) \
65  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
66 
67 #define vec4i_swizzle1(v,p,q,r,s) \
68  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
69 
70 #define vec2d_swizzle1(v,p,q) \
71  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
72 
73 #define vec4f_swizzle2(a,b,p,q,r,s) \
74  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
75 
76 #define vec4i_swizzle2(a,b,p,q,r,s) \
77  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
78 
79 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
80  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
81 
82 #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
83  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
84 
85 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
86  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
87 
88 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
89  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
90 
91 
92 // Use the packet_traits defined in AVX/PacketMath.h instead if we're going
93 // to leverage AVX instructions.
94 #ifndef EIGEN_VECTORIZE_AVX
95 template<> struct packet_traits<float> : default_packet_traits
96 {
97  typedef Packet4f type;
98  typedef Packet4f half;
99  enum {
100  Vectorizable = 1,
101  AlignedOnScalar = 1,
102  size=4,
103  HasHalfPacket = 0,
104 
105  HasDiv = 1,
106  HasSin = EIGEN_FAST_MATH,
107  HasCos = EIGEN_FAST_MATH,
108  HasLog = 1,
109  HasExp = 1,
110  HasSqrt = 1,
111  HasRsqrt = 1,
112  HasBlend = 1
113  };
114 };
115 template<> struct packet_traits<double> : default_packet_traits
116 {
117  typedef Packet2d type;
118  typedef Packet2d half;
119  enum {
120  Vectorizable = 1,
121  AlignedOnScalar = 1,
122  size=2,
123  HasHalfPacket = 0,
124 
125  HasDiv = 1,
126  HasExp = 1,
127  HasSqrt = 1,
128  HasRsqrt = 1,
129  HasBlend = 1
130  };
131 };
132 #endif
133 template<> struct packet_traits<int> : default_packet_traits
134 {
135  typedef Packet4i type;
136  typedef Packet4i half;
137  enum {
138  // FIXME check the Has*
139  Vectorizable = 1,
140  AlignedOnScalar = 1,
141  size=4,
142 
143  HasBlend = 1
144  };
145 };
146 
147 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
148 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
149 template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
150 
151 #if EIGEN_COMP_MSVC==1500
152 // Workaround MSVC 9 internal compiler error.
153 // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
154 // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
155 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
156 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
157 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
158 #else
159 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps1(from); }
160 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
161 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
162 #endif
163 
164 // GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
165 // However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
166 // Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
167 // Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.
168 // Also note that with AVX, we want it to generate a vbroadcastss.
169 #if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__)
170 template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
171  return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
172 }
173 #endif
174 
175 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
176 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
177 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
178 
179 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
180 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
181 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
182 
183 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
184 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
185 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
186 
187 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
188 {
189  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
190  return _mm_xor_ps(a,mask);
191 }
192 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
193 {
194  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
195  return _mm_xor_pd(a,mask);
196 }
197 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
198 {
199  return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
200 }
201 
202 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
203 template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
204 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
205 
206 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
207 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
208 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
209 {
210 #ifdef EIGEN_VECTORIZE_SSE4_1
211  return _mm_mullo_epi32(a,b);
212 #else
213  // this version is slightly faster than 4 scalar products
214  return vec4i_swizzle1(
215  vec4i_swizzle2(
216  _mm_mul_epu32(a,b),
217  _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
218  vec4i_swizzle1(b,1,0,3,2)),
219  0,2,0,2),
220  0,2,1,3);
221 #endif
222 }
223 
224 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
225 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
226 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
227 { eigen_assert(false && "packet integer division are not supported by SSE");
228  return pset1<Packet4i>(0);
229 }
230 
231 // for some weird raisons, it has to be overloaded for packet of integers
232 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
233 #ifdef __FMA__
234 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
235 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
236 #endif
237 
238 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
239 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
240 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
241 {
242 #ifdef EIGEN_VECTORIZE_SSE4_1
243  return _mm_min_epi32(a,b);
244 #else
245  // after some bench, this version *is* faster than a scalar implementation
246  Packet4i mask = _mm_cmplt_epi32(a,b);
247  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
248 #endif
249 }
250 
251 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
252 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
253 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
254 {
255 #ifdef EIGEN_VECTORIZE_SSE4_1
256  return _mm_max_epi32(a,b);
257 #else
258  // after some bench, this version *is* faster than a scalar implementation
259  Packet4i mask = _mm_cmpgt_epi32(a,b);
260  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
261 #endif
262 }
263 
264 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
265 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
266 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
267 
268 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
269 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
270 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
271 
272 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
273 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
274 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
275 
276 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
277 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
278 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
279 
280 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
281 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
282 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
283 
284 #if EIGEN_COMP_MSVC
285  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
286  EIGEN_DEBUG_UNALIGNED_LOAD
287  #if (EIGEN_COMP_MSVC==1600)
288  // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
289  // (i.e., it does not generate an unaligned load!!
290  // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
291  // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
292  __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
293  res = _mm_loadh_pi(res, (const __m64*)(from+2));
294  return res;
295  #else
296  return _mm_loadu_ps(from);
297  #endif
298  }
299  template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
300  template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from)); }
301 #else
302 // Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
303 // require pointer casting to incompatible pointer types and leads to invalid code
304 // because of the strict aliasing rule. The "dummy" stuff are required to enforce
305 // a correct instruction dependency.
306 // TODO: do the same for MSVC (ICC is compatible)
307 // NOTE: with the code below, MSVC's compiler crashes!
308 
309 #if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386 || (EIGEN_ARCH_x86_64 && EIGEN_GNUC_AT_LEAST(4, 8)))
310  // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
311  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
312  #define EIGEN_AVOID_CUSTOM_UNALIGNED_STORES 1
313 #elif EIGEN_COMP_CLANG
314  // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
315  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
316  #define EIGEN_AVOID_CUSTOM_UNALIGNED_STORES 0
317 #else
318  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
319  #define EIGEN_AVOID_CUSTOM_UNALIGNED_STORES 0
320 #endif
321 
322 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
323 {
324  EIGEN_DEBUG_UNALIGNED_LOAD
325 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
326  return _mm_loadu_ps(from);
327 #else
328  __m128d res;
329  res = _mm_load_sd((const double*)(from)) ;
330  res = _mm_loadh_pd(res, (const double*)(from+2)) ;
331  return _mm_castpd_ps(res);
332 #endif
333 }
334 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
335 {
336  EIGEN_DEBUG_UNALIGNED_LOAD
337 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
338  return _mm_loadu_pd(from);
339 #else
340  __m128d res;
341  res = _mm_load_sd(from) ;
342  res = _mm_loadh_pd(res,from+1);
343  return res;
344 #endif
345 }
346 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
347 {
348  EIGEN_DEBUG_UNALIGNED_LOAD
349 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
350  return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
351 #else
352  __m128d res;
353  res = _mm_load_sd((const double*)(from)) ;
354  res = _mm_loadh_pd(res, (const double*)(from+2)) ;
355  return _mm_castpd_si128(res);
356 #endif
357 }
358 #endif
359 
360 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
361 {
362  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
363 }
364 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
365 { return pset1<Packet2d>(from[0]); }
366 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
367 {
368  Packet4i tmp;
369  tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));
370  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
371 }
372 
373 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
374 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
375 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
376 
377 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
378  EIGEN_DEBUG_UNALIGNED_STORE
379 #if EIGEN_AVOID_CUSTOM_UNALIGNED_STORES
380  _mm_storeu_pd(to, from);
381 #else
382  _mm_storel_pd((to), from);
383  _mm_storeh_pd((to+1), from);
384 #endif
385 }
386 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castps_pd(from))); }
387 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castsi128_pd(from))); }
388 
389 template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
390 {
391  return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
392 }
393 template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
394 {
395  return _mm_set_pd(from[1*stride], from[0*stride]);
396 }
397 template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
398 {
399  return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
400  }
401 
402 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
403 {
404  to[stride*0] = _mm_cvtss_f32(from);
405  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
406  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
407  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
408 }
409 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
410 {
411  to[stride*0] = _mm_cvtsd_f64(from);
412  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
413 }
414 template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
415 {
416  to[stride*0] = _mm_cvtsi128_si32(from);
417  to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
418  to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
419  to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
420 }
421 
422 // some compilers might be tempted to perform multiple moves instead of using a vector path.
423 template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
424 {
425  Packet4f pa = _mm_set_ss(a);
426  pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
427 }
428 // some compilers might be tempted to perform multiple moves instead of using a vector path.
429 template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
430 {
431  Packet2d pa = _mm_set_sd(a);
432  pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
433 }
434 
435 #ifndef EIGEN_VECTORIZE_AVX
436 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
437 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
438 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
439 #endif
440 
441 #if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
442 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
443 // Direct of the struct members fixed bug #62.
444 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
445 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
446 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
447 #elif EIGEN_COMP_MSVC_STRICT
448 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
449 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
450 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
451 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
452 #else
453 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
454 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
455 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
456 #endif
457 
458 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
459 { return _mm_shuffle_ps(a,a,0x1B); }
460 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
461 { return _mm_shuffle_pd(a,a,0x1); }
462 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
463 { return _mm_shuffle_epi32(a,0x1B); }
464 
465 template<size_t offset>
466 struct protate_impl<offset, Packet4f>
467 {
468  static Packet4f run(const Packet4f& a) {
469  return vec4f_swizzle1(a, offset, (offset + 1) % 4, (offset + 2) % 4, (offset + 3) % 4);
470  }
471 };
472 
473 template<size_t offset>
474 struct protate_impl<offset, Packet4i>
475 {
476  static Packet4i run(const Packet4i& a) {
477  return vec4i_swizzle1(a, offset, (offset + 1) % 4, (offset + 2) % 4, (offset + 3) % 4);
478  }
479 };
480 
481 template<size_t offset>
482 struct protate_impl<offset, Packet2d>
483 {
484  static Packet2d run(const Packet2d& a) {
485  return vec2d_swizzle1(a, offset, (offset + 1) % 2);
486  }
487 };
488 
489 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
490 {
491  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
492  return _mm_and_ps(a,mask);
493 }
494 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
495 {
496  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
497  return _mm_and_pd(a,mask);
498 }
499 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
500 {
501  #ifdef EIGEN_VECTORIZE_SSSE3
502  return _mm_abs_epi32(a);
503  #else
504  Packet4i aux = _mm_srai_epi32(a,31);
505  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
506  #endif
507 }
508 
509 // with AVX, the default implementations based on pload1 are faster
510 #ifndef __AVX__
511 template<> EIGEN_STRONG_INLINE void
512 pbroadcast4<Packet4f>(const float *a,
513  Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
514 {
515  a3 = pload<Packet4f>(a);
516  a0 = vec4f_swizzle1(a3, 0,0,0,0);
517  a1 = vec4f_swizzle1(a3, 1,1,1,1);
518  a2 = vec4f_swizzle1(a3, 2,2,2,2);
519  a3 = vec4f_swizzle1(a3, 3,3,3,3);
520 }
521 template<> EIGEN_STRONG_INLINE void
522 pbroadcast4<Packet2d>(const double *a,
523  Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
524 {
525 #ifdef EIGEN_VECTORIZE_SSE3
526  a0 = _mm_loaddup_pd(a+0);
527  a1 = _mm_loaddup_pd(a+1);
528  a2 = _mm_loaddup_pd(a+2);
529  a3 = _mm_loaddup_pd(a+3);
530 #else
531  a1 = pload<Packet2d>(a);
532  a0 = vec2d_swizzle1(a1, 0,0);
533  a1 = vec2d_swizzle1(a1, 1,1);
534  a3 = pload<Packet2d>(a+2);
535  a2 = vec2d_swizzle1(a3, 0,0);
536  a3 = vec2d_swizzle1(a3, 1,1);
537 #endif
538 }
539 #endif
540 
541 EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
542 {
543  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
544  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
545  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
546  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
547 }
548 
549 #ifdef EIGEN_VECTORIZE_SSE3
550 // TODO implement SSE2 versions as well as integer versions
551 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
552 {
553  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
554 }
555 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
556 {
557  return _mm_hadd_pd(vecs[0], vecs[1]);
558 }
559 // SSSE3 version:
560 // EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
561 // {
562 // return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
563 // }
564 
565 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
566 {
567  Packet4f tmp0 = _mm_hadd_ps(a,a);
568  return pfirst<Packet4f>(_mm_hadd_ps(tmp0, tmp0));
569 }
570 
571 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst<Packet2d>(_mm_hadd_pd(a, a)); }
572 
573 // SSSE3 version:
574 // EIGEN_STRONG_INLINE float predux(const Packet4i& a)
575 // {
576 // Packet4i tmp0 = _mm_hadd_epi32(a,a);
577 // return pfirst(_mm_hadd_epi32(tmp0, tmp0));
578 // }
579 #else
580 // SSE2 versions
581 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
582 {
583  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
584  return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
585 }
586 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
587 {
588  return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
589 }
590 
591 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
592 {
593  Packet4f tmp0, tmp1, tmp2;
594  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
595  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
596  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
597  tmp0 = _mm_add_ps(tmp0, tmp1);
598  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
599  tmp1 = _mm_add_ps(tmp1, tmp2);
600  tmp2 = _mm_movehl_ps(tmp1, tmp0);
601  tmp0 = _mm_movelh_ps(tmp0, tmp1);
602  return _mm_add_ps(tmp0, tmp2);
603 }
604 
605 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
606 {
607  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
608 }
609 #endif // SSE3
610 
611 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
612 {
613  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
614  return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
615 }
616 
617 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
618 {
619  Packet4i tmp0, tmp1, tmp2;
620  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
621  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
622  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
623  tmp0 = _mm_add_epi32(tmp0, tmp1);
624  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
625  tmp1 = _mm_add_epi32(tmp1, tmp2);
626  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
627  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
628  return _mm_add_epi32(tmp0, tmp2);
629 }
630 
631 // Other reduction functions:
632 
633 // mul
634 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
635 {
636  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
637  return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
638 }
639 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
640 {
641  return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
642 }
643 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
644 {
645  // after some experiments, it is seems this is the fastest way to implement it
646  // for GCC (eg., reusing pmul is very slow !)
647  // TODO try to call _mm_mul_epu32 directly
648  EIGEN_ALIGN16 int aux[4];
649  pstore(aux, a);
650  return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
651 }
652 
653 // min
654 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
655 {
656  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
657  return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
658 }
659 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
660 {
661  return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
662 }
663 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
664 {
665 #ifdef EIGEN_VECTORIZE_SSE4_1
666  Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
667  return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
668 #else
669  // after some experiments, it is seems this is the fastest way to implement it
670  // for GCC (eg., it does not like using std::min after the pstore !!)
671  EIGEN_ALIGN16 int aux[4];
672  pstore(aux, a);
673  int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
674  int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
675  return aux0<aux2 ? aux0 : aux2;
676 #endif // EIGEN_VECTORIZE_SSE4_1
677 }
678 
679 // max
680 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
681 {
682  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
683  return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
684 }
685 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
686 {
687  return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
688 }
689 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
690 {
691 #ifdef EIGEN_VECTORIZE_SSE4_1
692  Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
693  return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
694 #else
695  // after some experiments, it is seems this is the fastest way to implement it
696  // for GCC (eg., it does not like using std::min after the pstore !!)
697  EIGEN_ALIGN16 int aux[4];
698  pstore(aux, a);
699  int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
700  int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
701  return aux0>aux2 ? aux0 : aux2;
702 #endif // EIGEN_VECTORIZE_SSE4_1
703 }
704 
705 #if EIGEN_COMP_GNUC
706 // template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
707 // {
708 // Packet4f res = b;
709 // asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
710 // return res;
711 // }
712 // EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
713 // {
714 // Packet4i res = a;
715 // asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
716 // return res;
717 // }
718 #endif
719 
720 #ifdef EIGEN_VECTORIZE_SSSE3
721 // SSSE3 versions
722 template<int Offset>
723 struct palign_impl<Offset,Packet4f>
724 {
725  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
726  {
727  if (Offset!=0)
728  first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
729  }
730 };
731 
732 template<int Offset>
733 struct palign_impl<Offset,Packet4i>
734 {
735  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
736  {
737  if (Offset!=0)
738  first = _mm_alignr_epi8(second,first, Offset*4);
739  }
740 };
741 
742 template<int Offset>
743 struct palign_impl<Offset,Packet2d>
744 {
745  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
746  {
747  if (Offset==1)
748  first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
749  }
750 };
751 #else
752 // SSE2 versions
753 template<int Offset>
754 struct palign_impl<Offset,Packet4f>
755 {
756  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
757  {
758  if (Offset==1)
759  {
760  first = _mm_move_ss(first,second);
761  first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
762  }
763  else if (Offset==2)
764  {
765  first = _mm_movehl_ps(first,first);
766  first = _mm_movelh_ps(first,second);
767  }
768  else if (Offset==3)
769  {
770  first = _mm_move_ss(first,second);
771  first = _mm_shuffle_ps(first,second,0x93);
772  }
773  }
774 };
775 
776 template<int Offset>
777 struct palign_impl<Offset,Packet4i>
778 {
779  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
780  {
781  if (Offset==1)
782  {
783  first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
784  first = _mm_shuffle_epi32(first,0x39);
785  }
786  else if (Offset==2)
787  {
788  first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
789  first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
790  }
791  else if (Offset==3)
792  {
793  first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
794  first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
795  }
796  }
797 };
798 
799 template<int Offset>
800 struct palign_impl<Offset,Packet2d>
801 {
802  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
803  {
804  if (Offset==1)
805  {
806  first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
807  first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
808  }
809  }
810 };
811 #endif
812 
813 EIGEN_DEVICE_FUNC inline void
814 ptranspose(PacketBlock<Packet4f,4>& kernel) {
815  _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);
816 }
817 
818 EIGEN_DEVICE_FUNC inline void
819 ptranspose(PacketBlock<Packet2d,2>& kernel) {
820  __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
821  kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
822  kernel.packet[1] = tmp;
823 }
824 
825 EIGEN_DEVICE_FUNC inline void
826 ptranspose(PacketBlock<Packet4i,4>& kernel) {
827  __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
828  __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
829  __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
830  __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
831 
832  kernel.packet[0] = _mm_unpacklo_epi64(T0, T1);
833  kernel.packet[1] = _mm_unpackhi_epi64(T0, T1);
834  kernel.packet[2] = _mm_unpacklo_epi64(T2, T3);
835  kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
836 }
837 
838 template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
839  const __m128i zero = _mm_setzero_si128();
840  const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
841  __m128i false_mask = _mm_cmpeq_epi32(select, zero);
842 #ifdef EIGEN_VECTORIZE_SSE4_1
843  return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
844 #else
845  return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
846 #endif
847 }
848 template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
849  const __m128 zero = _mm_setzero_ps();
850  const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
851  __m128 false_mask = _mm_cmpeq_ps(select, zero);
852 #ifdef EIGEN_VECTORIZE_SSE4_1
853  return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
854 #else
855  return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
856 #endif
857 }
858 template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
859  const __m128d zero = _mm_setzero_pd();
860  const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
861  __m128d false_mask = _mm_cmpeq_pd(select, zero);
862 #ifdef EIGEN_VECTORIZE_SSE4_1
863  return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
864 #else
865  return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
866 #endif
867 }
868 
869 } // end namespace internal
870 
871 } // end namespace Eigen
872 
873 #endif // EIGEN_PACKET_MATH_SSE_H
Definition: LDLT.h:16
Definition: Constants.h:222
Definition: Eigen_Colamd.h:54