1 #ifndef SSE2NEON_H
2 #define SSE2NEON_H
3 
4 // This header file provides a simple API translation layer
5 // between SSE intrinsics to their corresponding ARM NEON versions
6 //
7 // This header file does not (yet) translate *all* of the SSE intrinsics.
8 // Since this is in support of a specific porting effort, I have only
9 // included the intrinsics I needed to get my port to work.
10 //
11 // Questions/Comments/Feedback send to: jratcliffscarab@gmail.com
12 //
13 // If you want to improve or add to this project, send me an
14 // email and I will probably approve your access to the depot.
15 //
16 // Project is located here:
17 //
18 //	https://github.com/jratcliff63367/sse2neon
19 //
20 // Show your appreciation for open source by sending me a bitcoin tip to the following
21 // address.
22 //
23 // TipJar: 1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p :
24 // https://blockchain.info/address/1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p
25 //
26 //
27 // Contributors to this project are:
28 //
29 // John W. Ratcliff     : jratcliffscarab@gmail.com
30 // Brandon Rowlett      : browlett@nvidia.com
31 // Ken Fast             : kfast@gdeb.com
32 // Eric van Beurden     : evanbeurden@nvidia.com
33 // Alexander Potylitsin : apotylitsin@nvidia.com
34 //
35 //
36 // *********************************************************************************************************************
37 // apoty: March 17, 2017
38 // Current version was changed in most to fix issues and potential issues.
39 // All unit tests were rewritten as a part of forge lib project to cover all implemented functions.
40 // *********************************************************************************************************************
41 // Release notes for January 20, 2017 version:
42 //
43 // The unit tests have been refactored.  They no longer assert on an error, instead they return a pass/fail condition
44 // The unit-tests now test 10,000 random float and int values against each intrinsic.
45 //
46 // SSE2NEON now supports 95 SSE intrinsics.  39 of them have formal unit tests which have been implemented and
47 // fully tested on NEON/ARM.  The remaining 56 still need unit tests implemented.
48 //
49 // A struct is now defined in this header file called 'SIMDVec' which can be used by applications which
50 // attempt to access the contents of an _m128 struct directly.  It is important to note that accessing the __m128
51 // struct directly is bad coding practice by Microsoft: @see: https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx
52 //
53 // However, some legacy source code may try to access the contents of an __m128 struct directly so the developer
54 // can use the SIMDVec as an alias for it.  Any casting must be done manually by the developer, as you cannot
55 // cast or otherwise alias the base NEON data type for intrinsic operations.
56 //
57 // A bug was found with the _mm_shuffle_ps intrinsic.  If the shuffle permutation was not one of the ones with
58 // a custom/unique implementation causing it to fall through to the default shuffle implementation it was failing
59 // to return the correct value.  This is now fixed.
60 //
61 // A bug was found with the _mm_cvtps_epi32 intrinsic.  This converts floating point values to integers.
62 // It was not honoring the correct rounding mode.  In SSE the default rounding mode when converting from float to int
63 // is to use 'round to even' otherwise known as 'bankers rounding'.  ARMv7 did not support this feature but ARMv8 does.
64 // As it stands today, this header file assumes ARMv8.  If you are trying to target really old ARM devices, you may get
65 // a build error.
66 //
67 // Support for a number of new intrinsics was added, however, none of them yet have unit-tests to 100% confirm they are
68 // producing the correct results on NEON.  These unit tests will be added as soon as possible.
69 //
70 // Here is the list of new instrinsics which have been added:
71 //
72 // _mm_cvtss_f32     :  extracts the lower order floating point value from the parameter
73 // _mm_add_ss        : adds the scalar single - precision floating point values of a and b
74 // _mm_div_ps        : Divides the four single - precision, floating - point values of a and b.
75 // _mm_div_ss        : Divides the scalar single - precision floating point value of a by b.
76 // _mm_sqrt_ss       : Computes the approximation of the square root of the scalar single - precision floating point value of in.
77 // _mm_rsqrt_ps      : Computes the approximations of the reciprocal square roots of the four single - precision floating point values of in.
78 // _mm_comilt_ss     : Compares the lower single - precision floating point scalar values of a and b using a less than operation
79 // _mm_comigt_ss     : Compares the lower single - precision floating point scalar values of a and b using a greater than operation.
80 // _mm_comile_ss     :  Compares the lower single - precision floating point scalar values of a and b using a less than or equal operation.
81 // _mm_comige_ss     : Compares the lower single - precision floating point scalar values of a and b using a greater than or equal operation.
82 // _mm_comieq_ss     :  Compares the lower single - precision floating point scalar values of a and b using an equality operation.
83 // _mm_comineq_s     :  Compares the lower single - precision floating point scalar values of a and b using an inequality operation
84 // _mm_unpackhi_epi8 : Interleaves the upper 8 signed or unsigned 8 - bit integers in a with the upper 8 signed or unsigned 8 - bit integers in b.
85 // _mm_unpackhi_epi16:  Interleaves the upper 4 signed or unsigned 16 - bit integers in a with the upper 4 signed or unsigned 16 - bit integers in b.
86 //
87 // *********************************************************************************************************************
88 /*
89 ** The MIT license:
90 **
91 ** Permission is hereby granted, free of charge, to any person obtaining a copy
92 ** of this software and associated documentation files (the "Software"), to deal
93 ** in the Software without restriction, including without limitation the rights
94 ** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
95 ** copies of the Software, and to permit persons to whom the Software is furnished
96 ** to do so, subject to the following conditions:
97 **
98 ** The above copyright notice and this permission notice shall be included in all
99 ** copies or substantial portions of the Software.
100 
101 ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
102 ** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
103 ** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
104 ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
105 ** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
106 ** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
107 */
108 
109 #define ENABLE_CPP_VERSION 0
110 
111 #if defined(__GNUC__) || defined(__clang__)
112 #	pragma push_macro("FORCE_INLINE")
113 #	pragma push_macro("ALIGN_STRUCT")
114 #	define FORCE_INLINE       static inline __attribute__((always_inline))
115 #	define ALIGN_STRUCT(x)    __attribute__((aligned(x)))
116 #else
117 #	error "Macro name collisions may happens with unknown compiler"
118 #	define FORCE_INLINE       static inline
119 #	define ALIGN_STRUCT(x)    __declspec(align(x))
120 #endif
121 
122 #include <stdint.h>
123 #include "arm_neon.h"
124 
125 
126 /*******************************************************/
127 /* MACRO for shuffle parameter for _mm_shuffle_ps().   */
128 /* Argument fp3 is a digit[0123] that represents the fp*/
129 /* from argument "b" of mm_shuffle_ps that will be     */
130 /* placed in fp3 of result. fp2 is the same for fp2 in */
131 /* result. fp1 is a digit[0123] that represents the fp */
132 /* from argument "a" of mm_shuffle_ps that will be     */
133 /* places in fp1 of result. fp0 is the same for fp0 of */
134 /* result                                              */
135 /*******************************************************/
136 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
137 	(((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
138 
139 /* indicate immediate constant argument in a given range */
140 #define __constrange(a,b) \
141 	const
142 
143 typedef float32x4_t __m128;
144 typedef int32x4_t __m128i;
145 
146 typedef float32x2_t __m64;
147 
148 
149 // ******************************************
150 // type-safe casting between types
151 // ******************************************
152 
153 #define vreinterpretq_m128_f16(x) \
154 	vreinterpretq_f32_f16(x)
155 
156 #define vreinterpretq_m128_f32(x) \
157 	(x)
158 
159 #define vreinterpretq_m128_f64(x) \
160 	vreinterpretq_f32_f64(x)
161 
162 
163 #define vreinterpretq_m128_u8(x) \
164 	vreinterpretq_f32_u8(x)
165 
166 #define vreinterpretq_m128_u16(x) \
167 	vreinterpretq_f32_u16(x)
168 
169 #define vreinterpretq_m128_u32(x) \
170 	vreinterpretq_f32_u32(x)
171 
172 #define vreinterpretq_m128_u64(x) \
173 	vreinterpretq_f32_u64(x)
174 
175 
176 #define vreinterpretq_m128_s8(x) \
177 	vreinterpretq_f32_s8(x)
178 
179 #define vreinterpretq_m128_s16(x) \
180 	vreinterpretq_f32_s16(x)
181 
182 #define vreinterpretq_m128_s32(x) \
183 	vreinterpretq_f32_s32(x)
184 
185 #define vreinterpretq_m128_s64(x) \
186 	vreinterpretq_f32_s64(x)
187 
188 
189 #define vreinterpretq_f16_m128(x) \
190 	vreinterpretq_f16_f32(x)
191 
192 #define vreinterpretq_f32_m128(x) \
193 	(x)
194 
195 #define vreinterpretq_f64_m128(x) \
196 	vreinterpretq_f64_f32(x)
197 
198 
199 #define vreinterpretq_u8_m128(x) \
200 	vreinterpretq_u8_f32(x)
201 
202 #define vreinterpretq_u16_m128(x) \
203 	vreinterpretq_u16_f32(x)
204 
205 #define vreinterpretq_u32_m128(x) \
206 	vreinterpretq_u32_f32(x)
207 
208 #define vreinterpretq_u64_m128(x) \
209 	vreinterpretq_u64_f32(x)
210 
211 
212 #define vreinterpretq_s8_m128(x) \
213 	vreinterpretq_s8_f32(x)
214 
215 #define vreinterpretq_s16_m128(x) \
216 	vreinterpretq_s16_f32(x)
217 
218 #define vreinterpretq_s32_m128(x) \
219 	vreinterpretq_s32_f32(x)
220 
221 #define vreinterpretq_s64_m128(x) \
222 	vreinterpretq_s64_f32(x)
223 
224 
225 #define vreinterpretq_m128i_s8(x) \
226 	vreinterpretq_s32_s8(x)
227 
228 #define vreinterpretq_m128i_s16(x) \
229 	vreinterpretq_s32_s16(x)
230 
231 #define vreinterpretq_m128i_s32(x) \
232 	(x)
233 
234 #define vreinterpretq_m128i_s64(x) \
235 	vreinterpretq_s32_s64(x)
236 
237 
238 #define vreinterpretq_m128i_u8(x) \
239 	vreinterpretq_s32_u8(x)
240 
241 #define vreinterpretq_m128i_u16(x) \
242 	vreinterpretq_s32_u16(x)
243 
244 #define vreinterpretq_m128i_u32(x) \
245 	vreinterpretq_s32_u32(x)
246 
247 #define vreinterpretq_m128i_u64(x) \
248 	vreinterpretq_s32_u64(x)
249 
250 
251 #define vreinterpretq_s8_m128i(x) \
252 	vreinterpretq_s8_s32(x)
253 
254 #define vreinterpretq_s16_m128i(x) \
255 	vreinterpretq_s16_s32(x)
256 
257 #define vreinterpretq_s32_m128i(x) \
258 	(x)
259 
260 #define vreinterpretq_s64_m128i(x) \
261 	vreinterpretq_s64_s32(x)
262 
263 
264 #define vreinterpretq_u8_m128i(x) \
265 	vreinterpretq_u8_s32(x)
266 
267 #define vreinterpretq_u16_m128i(x) \
268 	vreinterpretq_u16_s32(x)
269 
270 #define vreinterpretq_u32_m128i(x) \
271 	vreinterpretq_u32_s32(x)
272 
273 #define vreinterpretq_u64_m128i(x) \
274 	vreinterpretq_u64_s32(x)
275 
276 
277 // union intended to allow direct access to an __m128 variable using the names that the MSVC
278 // compiler provides.  This union should really only be used when trying to access the members
279 // of the vector as integer values.  GCC/clang allow native access to the float members through
280 // a simple array access operator (in C since 4.6, in C++ since 4.8).
281 //
282 // Ideally direct accesses to SIMD vectors should not be used since it can cause a performance
283 // hit.  If it really is needed however, the original __m128 variable can be aliased with a
284 // pointer to this union and used to access individual components.  The use of this union should
285 // be hidden behind a macro that is used throughout the codebase to access the members instead
286 // of always declaring this type of variable.
287 typedef union ALIGN_STRUCT(16) SIMDVec
288 {
289 	float       m128_f32[4];    // as floats - do not to use this.  Added for convenience.
290 	int8_t      m128_i8[16];    // as signed 8-bit integers.
291 	int16_t     m128_i16[8];    // as signed 16-bit integers.
292 	int32_t     m128_i32[4];    // as signed 32-bit integers.
293 	int64_t     m128_i64[2];    // as signed 64-bit integers.
294 	uint8_t     m128_u8[16];    // as unsigned 8-bit integers.
295 	uint16_t    m128_u16[8];    // as unsigned 16-bit integers.
296 	uint32_t    m128_u32[4];    // as unsigned 32-bit integers.
297 	uint64_t    m128_u64[2];    // as unsigned 64-bit integers.
298 } SIMDVec;
299 
300 
301 // ******************************************
302 // Set/get methods
303 // ******************************************
304 
305 // extracts the lower order floating point value from the parameter : https://msdn.microsoft.com/en-us/library/bb514059%28v=vs.120%29.aspx?f=255&MSPPError=-2147217396
_mm_cvtss_f32(__m128 a)306 FORCE_INLINE float _mm_cvtss_f32(__m128 a)
307 {
308 	return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
309 }
310 
311 // Sets the 128-bit value to zero https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
_mm_setzero_si128()312 FORCE_INLINE __m128i _mm_setzero_si128()
313 {
314 	return vreinterpretq_m128i_s32(vdupq_n_s32(0));
315 }
316 
317 // Clears the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
_mm_setzero_ps(void)318 FORCE_INLINE __m128 _mm_setzero_ps(void)
319 {
320 	return vreinterpretq_m128_f32(vdupq_n_f32(0));
321 }
322 
323 // Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
_mm_set1_ps(float _w)324 FORCE_INLINE __m128 _mm_set1_ps(float _w)
325 {
326 	return vreinterpretq_m128_f32(vdupq_n_f32(_w));
327 }
328 
329 // Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
_mm_set_ps1(float _w)330 FORCE_INLINE __m128 _mm_set_ps1(float _w)
331 {
332 	return vreinterpretq_m128_f32(vdupq_n_f32(_w));
333 }
334 
335 // Sets the four single-precision, floating-point values to the four inputs. https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
_mm_set_ps(float w,float z,float y,float x)336 FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
337 {
338 	float __attribute__((aligned(16))) data[4] = { x, y, z, w };
339 	return vreinterpretq_m128_f32(vld1q_f32(data));
340 }
341 
342 // Sets the four single-precision, floating-point values to the four inputs in reverse order. https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
_mm_setr_ps(float w,float z,float y,float x)343 FORCE_INLINE __m128 _mm_setr_ps(float w, float z , float y , float x )
344 {
345 	float __attribute__ ((aligned (16))) data[4] = { w, z, y, x };
346 	return vreinterpretq_m128_f32(vld1q_f32(data));
347 }
348 
349 
350 //added by hasindu
351 //Sets the 4 signed 32-bit integer values in reverse order https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx
_mm_setr_epi32(int i3,int i2,int i1,int i0)352 FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
353 {
354 	int32_t __attribute__((aligned(16))) data[4] = { i3, i2, i1, i0 };
355 	return vreinterpretq_m128i_s32(vld1q_s32(data));
356 }
357 
358 //following added by hasindu
359 //Sets the 16 signed 8-bit integer values to b.https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx
_mm_set1_epi8(char w)360 FORCE_INLINE __m128i _mm_set1_epi8(char w)
361 {
362 	return vreinterpretq_m128i_s8(vdupq_n_s8(w));
363 }
364 
365 
366 //following added by hasindu
367 //Sets the 8 signed 16-bit integer values to w. https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx
_mm_set1_epi16(short w)368 FORCE_INLINE __m128i _mm_set1_epi16(short w)
369 {
370 	return vreinterpretq_m128i_s16(vdupq_n_s16(w));
371 }
372 
373 //following added by hasindu
374 //Sets the 8 signed 16-bit integer values. https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx
_mm_set_epi16(short i7,short i6,short i5,short i4,short i3,short i2,short i1,short i0)375 FORCE_INLINE __m128i _mm_set_epi16(short i7, short i6, short i5, short i4, short i3, short i2, short i1, short i0)
376 {
377 	int16_t __attribute__((aligned(16))) data[8] = { i0, i1, i2, i3, i4, i5, i6, i7 };
378 	return vreinterpretq_m128i_s16(vld1q_s16(data));
379 }
380 
381 
382 // Sets the 4 signed 32-bit integer values to i. https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
_mm_set1_epi32(int _i)383 FORCE_INLINE __m128i _mm_set1_epi32(int _i)
384 {
385 	return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
386 }
387 
388 // Sets the 4 signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
_mm_set_epi32(int i3,int i2,int i1,int i0)389 FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
390 {
391 	int32_t __attribute__((aligned(16))) data[4] = { i0, i1, i2, i3 };
392 	return vreinterpretq_m128i_s32(vld1q_s32(data));
393 }
394 
395 // Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
_mm_store_ps(float * p,__m128 a)396 FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
397 {
398 	vst1q_f32(p, vreinterpretq_f32_m128(a));
399 }
400 
401 // Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
_mm_storeu_ps(float * p,__m128 a)402 FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
403 {
404 	vst1q_f32(p, vreinterpretq_f32_m128(a));
405 }
406 
407 // Stores four 32-bit integer values as (as a __m128i value) at the address p. https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
_mm_store_si128(__m128i * p,__m128i a)408 FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
409 {
410 	vst1q_s32((int32_t*) p, vreinterpretq_s32_m128i(a));
411 }
412 
413 //added by hasindu (verify this for requirement of alignment)
414 // Stores four 32-bit integer values as (as a __m128i value) at the address p. https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
_mm_storeu_si128(__m128i * p,__m128i a)415 FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
416 {
417 	vst1q_s32((int32_t*) p, vreinterpretq_s32_m128i(a));
418 }
419 
420 // Stores the lower single - precision, floating - point value. https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
_mm_store_ss(float * p,__m128 a)421 FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
422 {
423 	vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
424 }
425 
426 // Reads the lower 64 bits of b and stores them into the lower 64 bits of a.  https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
_mm_storel_epi64(__m128i * a,__m128i b)427 FORCE_INLINE void _mm_storel_epi64(__m128i* a, __m128i b)
428 {
429 	uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a));
430 	uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b));
431 	*a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi));
432 }
433 
434 // Stores the lower two single-precision floating point values of a to the address p. https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx
_mm_storel_pi(__m64 * p,__m128 a)435 FORCE_INLINE void _mm_storel_pi(__m64* p, __m128 a)
436 {
437 	*p = vget_low_f32(a);
438 }
439 
440 // Loads a single single-precision, floating-point value, copying it into all four words https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
_mm_load1_ps(const float * p)441 FORCE_INLINE __m128 _mm_load1_ps(const float * p)
442 {
443 	return vreinterpretq_m128_f32(vld1q_dup_f32(p));
444 }
445 #define _mm_load_ps1 _mm_load1_ps
446 
447 // Sets the lower two single-precision, floating-point values with 64
448 // bits of data loaded from the address p; the upper two values are passed
449 // through from a. https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx
_mm_loadl_pi(__m128 a,__m64 const * b)450 FORCE_INLINE __m128 _mm_loadl_pi(__m128 a,__m64 const *b)
451  {
452 	return vreinterpretq_m128_f32(vcombine_f32(vld1_f32((const float32_t*)b),vget_high_f32(a)));
453  }
454 
455 // Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
_mm_load_ps(const float * p)456 FORCE_INLINE __m128 _mm_load_ps(const float * p)
457 {
458 	return vreinterpretq_m128_f32(vld1q_f32(p));
459 }
460 
461 // Loads four single-precision, floating-point values.  https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
_mm_loadu_ps(const float * p)462 FORCE_INLINE __m128 _mm_loadu_ps(const float * p)
463 {
464 	// for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are equivalent for neon
465 	return vreinterpretq_m128_f32(vld1q_f32(p));
466 }
467 
468 // Loads an single - precision, floating - point value into the low word and clears the upper three words.  https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
_mm_load_ss(const float * p)469 FORCE_INLINE __m128 _mm_load_ss(const float * p)
470 {
471 	return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
472 }
473 
474 
475 // ******************************************
476 // Logic/Binary operations
477 // ******************************************
478 
479 // Compares for inequality.  https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
_mm_cmpneq_ps(__m128 a,__m128 b)480 FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
481 {
482 	return vreinterpretq_m128_u32( vmvnq_u32( vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)) ) );
483 }
484 
485 // Computes the bitwise AND-NOT of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
_mm_andnot_ps(__m128 a,__m128 b)486 FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
487 {
488 	return vreinterpretq_m128_s32( vbicq_s32(vreinterpretq_s32_m128(b), vreinterpretq_s32_m128(a)) ); // *NOTE* argument swap
489 }
490 
491 // Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the 128-bit value in a. https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
_mm_andnot_si128(__m128i a,__m128i b)492 FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
493 {
494 	return vreinterpretq_m128i_s32( vbicq_s32(vreinterpretq_s32_m128i(b), vreinterpretq_s32_m128i(a)) ); // *NOTE* argument swap
495 }
496 
497 // Computes the bitwise AND of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
_mm_and_si128(__m128i a,__m128i b)498 FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
499 {
500 	return vreinterpretq_m128i_s32( vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)) );
501 }
502 
503 // Computes the bitwise AND of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
_mm_and_ps(__m128 a,__m128 b)504 FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
505 {
506 	return vreinterpretq_m128_s32( vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)) );
507 }
508 
509 // Computes the bitwise OR of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
_mm_or_ps(__m128 a,__m128 b)510 FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
511 {
512 	return vreinterpretq_m128_s32( vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)) );
513 }
514 
515 // Computes bitwise EXOR (exclusive-or) of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
_mm_xor_ps(__m128 a,__m128 b)516 FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
517 {
518 	return vreinterpretq_m128_s32( veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)) );
519 }
520 
521 // Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
_mm_or_si128(__m128i a,__m128i b)522 FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
523 {
524 	return vreinterpretq_m128i_s32( vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)) );
525 }
526 
527 // Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in b.  https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
_mm_xor_si128(__m128i a,__m128i b)528 FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
529 {
530 	return vreinterpretq_m128i_s32( veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)) );
531 }
532 
533 // NEON does not provide this method
534 // Creates a 4-bit mask from the most significant bits of the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
_mm_movemask_ps(__m128 a)535 FORCE_INLINE int _mm_movemask_ps(__m128 a)
536 {
537 #if ENABLE_CPP_VERSION // I am not yet convinced that the NEON version is faster than the C version of this
538 	uint32x4_t &ia = *(uint32x4_t *)&a;
539 	return (ia[0] >> 31) | ((ia[1] >> 30) & 2) | ((ia[2] >> 29) & 4) | ((ia[3] >> 28) & 8);
540 #else
541 	static const uint32x4_t movemask = { 1, 2, 4, 8 };
542 	static const uint32x4_t highbit = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
543 	uint32x4_t t0 = vreinterpretq_u32_m128(a);
544 	uint32x4_t t1 = vtstq_u32(t0, highbit);
545 	uint32x4_t t2 = vandq_u32(t1, movemask);
546 	uint32x2_t t3 = vorr_u32(vget_low_u32(t2), vget_high_u32(t2));
547 	return vget_lane_u32(t3, 0) | vget_lane_u32(t3, 1);
548 #endif
549 }
550 
551 // Takes the upper 64 bits of a and places it in the low end of the result
552 // Takes the lower 64 bits of b and places it into the high end of the result.
_mm_shuffle_ps_1032(__m128 a,__m128 b)553 FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
554 {
555 	float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
556 	float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
557 	return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
558 }
559 
560 // takes the lower two 32-bit values from a and swaps them and places in high end of result
561 // takes the higher two 32 bit values from b and swaps them and places in low end of result.
_mm_shuffle_ps_2301(__m128 a,__m128 b)562 FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
563 {
564 	float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
565 	float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
566 	return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
567 }
568 
_mm_shuffle_ps_0321(__m128 a,__m128 b)569 FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
570 {
571 	float32x2_t a21 = vget_high_f32(vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
572 	float32x2_t b03 = vget_low_f32(vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
573 	return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
574 }
575 
_mm_shuffle_ps_2103(__m128 a,__m128 b)576 FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
577 {
578 	float32x2_t a03 = vget_low_f32(vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
579 	float32x2_t b21 = vget_high_f32(vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
580 	return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
581 }
582 
_mm_movelh_ps(__m128 a,__m128 b)583 FORCE_INLINE __m128 _mm_movelh_ps(__m128 a, __m128 b)
584 {
585 	float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
586 	float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
587 	return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
588 }
589 
_mm_movehl_ps(__m128 a,__m128 b)590 FORCE_INLINE __m128 _mm_movehl_ps(__m128 a, __m128 b)
591 {
592 	float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
593 	float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
594 	return vreinterpretq_m128_f32(vcombine_f32(a32, b32));
595 }
596 
_mm_shuffle_ps_1001(__m128 a,__m128 b)597 FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
598 {
599 	float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
600 	float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
601 	return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
602 }
603 
_mm_shuffle_ps_0101(__m128 a,__m128 b)604 FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
605 {
606 	float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
607 	float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
608 	return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
609 }
610 
611 // keeps the low 64 bits of b in the low and puts the high 64 bits of a in the high
_mm_shuffle_ps_3210(__m128 a,__m128 b)612 FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
613 {
614 	float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
615 	float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
616 	return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
617 }
618 
_mm_shuffle_ps_0011(__m128 a,__m128 b)619 FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
620 {
621 	float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
622 	float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
623 	return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
624 }
625 
_mm_shuffle_ps_0022(__m128 a,__m128 b)626 FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
627 {
628 	float32x2_t a22 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
629 	float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
630 	return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
631 }
632 
_mm_shuffle_ps_2200(__m128 a,__m128 b)633 FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
634 {
635 	float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
636 	float32x2_t b22 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
637 	return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
638 }
639 
_mm_shuffle_ps_3202(__m128 a,__m128 b)640 FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
641 {
642 	float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
643 	float32x2_t a22 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
644 	float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* apoty: TODO: use vzip ?*/
645 	float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
646 	return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
647 }
648 
_mm_shuffle_ps_1133(__m128 a,__m128 b)649 FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
650 {
651 	float32x2_t a33 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
652 	float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
653 	return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
654 }
655 
_mm_shuffle_ps_2010(__m128 a,__m128 b)656 FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
657 {
658 	float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
659 	float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
660 	float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
661 	float32x2_t b20 = vset_lane_f32(b2, b00, 1);
662 	return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
663 }
664 
_mm_shuffle_ps_2001(__m128 a,__m128 b)665 FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
666 {
667 	float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
668 	float32_t b2 = vgetq_lane_f32(b, 2);
669 	float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
670 	float32x2_t b20 = vset_lane_f32(b2, b00, 1);
671 	return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
672 }
673 
_mm_shuffle_ps_2032(__m128 a,__m128 b)674 FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
675 {
676 	float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
677 	float32_t b2 = vgetq_lane_f32(b, 2);
678 	float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
679 	float32x2_t b20 = vset_lane_f32(b2, b00, 1);
680 	return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
681 }
682 
683 // NEON does not support a general purpose permute intrinsic
684 // Currently I am not sure whether the C implementation is faster or slower than the NEON version.
685 // Note, this has to be expanded as a template because the shuffle value must be an immediate value.
686 // The same is true on SSE as well.
687 // Selects four specific single-precision, floating-point values from a and b, based on the mask i. https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
688 #if ENABLE_CPP_VERSION // I am not convinced that the NEON version is faster than the C version yet.
689 FORCE_INLINE __m128 _mm_shuffle_ps_default(__m128 a, __m128 b, __constrange(0,255) int imm)
690 {
691 	__m128 ret;
692 	ret[0] = a[imm & 0x3];
693 	ret[1] = a[(imm >> 2) & 0x3];
694 	ret[2] = b[(imm >> 4) & 0x03];
695 	ret[3] = b[(imm >> 6) & 0x03];
696 	return ret;
697 }
698 #else
699 #define _mm_shuffle_ps_default(a, b, imm) \
700 ({ \
701 	float32x4_t ret_; \
702 	ret_ = vmovq_n_f32(vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & 0x3)); \
703 	ret_ = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), ret_, 1); \
704 	ret_ = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), ret_, 2); \
705 	ret_ = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), ret_, 3); \
706 	vreinterpretq_m128_f32(ret_); \
707 })
708 #endif
709 
710 //FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255) int imm)
711 #define _mm_shuffle_ps(a, b, imm) \
712 ({ \
713 	__m128 ret; \
714 	switch (imm) \
715 	{ \
716 		case _MM_SHUFFLE(1, 0, 3, 2): ret = _mm_shuffle_ps_1032((a), (b)); break; \
717 		case _MM_SHUFFLE(2, 3, 0, 1): ret = _mm_shuffle_ps_2301((a), (b)); break; \
718 		case _MM_SHUFFLE(0, 3, 2, 1): ret = _mm_shuffle_ps_0321((a), (b)); break; \
719 		case _MM_SHUFFLE(2, 1, 0, 3): ret = _mm_shuffle_ps_2103((a), (b)); break; \
720 		case _MM_SHUFFLE(1, 0, 1, 0): ret = _mm_movelh_ps      ((a), (b)); break; \
721 		case _MM_SHUFFLE(1, 0, 0, 1): ret = _mm_shuffle_ps_1001((a), (b)); break; \
722 		case _MM_SHUFFLE(0, 1, 0, 1): ret = _mm_shuffle_ps_0101((a), (b)); break; \
723 		case _MM_SHUFFLE(3, 2, 1, 0): ret = _mm_shuffle_ps_3210((a), (b)); break; \
724 		case _MM_SHUFFLE(0, 0, 1, 1): ret = _mm_shuffle_ps_0011((a), (b)); break; \
725 		case _MM_SHUFFLE(0, 0, 2, 2): ret = _mm_shuffle_ps_0022((a), (b)); break; \
726 		case _MM_SHUFFLE(2, 2, 0, 0): ret = _mm_shuffle_ps_2200((a), (b)); break; \
727 		case _MM_SHUFFLE(3, 2, 0, 2): ret = _mm_shuffle_ps_3202((a), (b)); break; \
728         case _MM_SHUFFLE(3, 2, 3, 2): ret = _mm_movehl_ps      ((a), (b)); break; \
729 		case _MM_SHUFFLE(1, 1, 3, 3): ret = _mm_shuffle_ps_1133((a), (b)); break; \
730 		case _MM_SHUFFLE(2, 0, 1, 0): ret = _mm_shuffle_ps_2010((a), (b)); break; \
731 		case _MM_SHUFFLE(2, 0, 0, 1): ret = _mm_shuffle_ps_2001((a), (b)); break; \
732 		case _MM_SHUFFLE(2, 0, 3, 2): ret = _mm_shuffle_ps_2032((a), (b)); break; \
733 		default: ret = _mm_shuffle_ps_default((a), (b), (imm)); break; \
734 	} \
735 	ret; \
736 })
737 
738 // Takes the upper 64 bits of a and places it in the low end of the result
739 // Takes the lower 64 bits of a and places it into the high end of the result.
_mm_shuffle_epi_1032(__m128i a)740 FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
741 {
742 	int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
743 	int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
744 	return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
745 }
746 
747 // takes the lower two 32-bit values from a and swaps them and places in low end of result
748 // takes the higher two 32 bit values from a and swaps them and places in high end of result.
_mm_shuffle_epi_2301(__m128i a)749 FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
750 {
751 	int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
752 	int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
753 	return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
754 }
755 
756 // rotates the least significant 32 bits into the most signficant 32 bits, and shifts the rest down
_mm_shuffle_epi_0321(__m128i a)757 FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
758 {
759 	return vreinterpretq_m128i_s32(vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
760 }
761 
762 // rotates the most significant 32 bits into the least signficant 32 bits, and shifts the rest up
_mm_shuffle_epi_2103(__m128i a)763 FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
764 {
765 	return vreinterpretq_m128i_s32(vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
766 }
767 
768 // gets the lower 64 bits of a, and places it in the upper 64 bits
769 // gets the lower 64 bits of a and places it in the lower 64 bits
_mm_shuffle_epi_1010(__m128i a)770 FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
771 {
772 	int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
773 	return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
774 }
775 
776 // gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the lower 64 bits
777 // gets the lower 64 bits of a, and places it in the upper 64 bits
_mm_shuffle_epi_1001(__m128i a)778 FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
779 {
780 	int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
781 	int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
782 	return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
783 }
784 
785 // gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the upper 64 bits
786 // gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the lower 64 bits
_mm_shuffle_epi_0101(__m128i a)787 FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
788 {
789 	int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
790 	return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
791 }
792 
_mm_shuffle_epi_2211(__m128i a)793 FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
794 {
795 	int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
796 	int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
797 	return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
798 }
799 
_mm_shuffle_epi_0122(__m128i a)800 FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
801 {
802 	int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
803 	int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
804 	return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
805 }
806 
_mm_shuffle_epi_3332(__m128i a)807 FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
808 {
809 	int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
810 	int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
811 	return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
812 }
813 
814 //FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a, __constrange(0,255) int imm)
815 #if ENABLE_CPP_VERSION
816 FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a, __constrange(0,255) int imm)
817 {
818 	__m128i ret;
819 	ret[0] = a[imm & 0x3];
820 	ret[1] = a[(imm >> 2) & 0x3];
821 	ret[2] = a[(imm >> 4) & 0x03];
822 	ret[3] = a[(imm >> 6) & 0x03];
823 	return ret;
824 }
825 #else
826 #define _mm_shuffle_epi32_default(a, imm) \
827 ({ \
828 	int32x4_t ret; \
829 	ret = vmovq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & 0x3)); \
830 	ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), ret, 1); \
831 	ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), ret, 2); \
832 	ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), ret, 3); \
833 	vreinterpretq_m128i_s32(ret); \
834 })
835 #endif
836 
837 //FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255) int imm)
838 #if defined(__aarch64__)
839 #define _mm_shuffle_epi32_splat(a, imm) \
840 ({ \
841 	vreinterpretq_m128i_s32(vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \
842 })
843 #else
844 #define _mm_shuffle_epi32_splat(a, imm) \
845 ({ \
846 	vreinterpretq_m128i_s32(vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \
847 })
848 #endif
849 
850 // Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm.	https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
851 //FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a, __constrange(0,255) int imm)
852 #define _mm_shuffle_epi32(a, imm) \
853 ({ \
854 	__m128i ret; \
855 	switch (imm) \
856 	{ \
857 		case _MM_SHUFFLE(1, 0, 3, 2): ret = _mm_shuffle_epi_1032((a)); break; \
858 		case _MM_SHUFFLE(2, 3, 0, 1): ret = _mm_shuffle_epi_2301((a)); break; \
859 		case _MM_SHUFFLE(0, 3, 2, 1): ret = _mm_shuffle_epi_0321((a)); break; \
860 		case _MM_SHUFFLE(2, 1, 0, 3): ret = _mm_shuffle_epi_2103((a)); break; \
861 		case _MM_SHUFFLE(1, 0, 1, 0): ret = _mm_shuffle_epi_1010((a)); break; \
862 		case _MM_SHUFFLE(1, 0, 0, 1): ret = _mm_shuffle_epi_1001((a)); break; \
863 		case _MM_SHUFFLE(0, 1, 0, 1): ret = _mm_shuffle_epi_0101((a)); break; \
864 		case _MM_SHUFFLE(2, 2, 1, 1): ret = _mm_shuffle_epi_2211((a)); break; \
865 		case _MM_SHUFFLE(0, 1, 2, 2): ret = _mm_shuffle_epi_0122((a)); break; \
866 		case _MM_SHUFFLE(3, 3, 3, 2): ret = _mm_shuffle_epi_3332((a)); break; \
867 		case _MM_SHUFFLE(0, 0, 0, 0): ret = _mm_shuffle_epi32_splat((a),0); break; \
868 		case _MM_SHUFFLE(1, 1, 1, 1): ret = _mm_shuffle_epi32_splat((a),1); break; \
869 		case _MM_SHUFFLE(2, 2, 2, 2): ret = _mm_shuffle_epi32_splat((a),2); break; \
870 		case _MM_SHUFFLE(3, 3, 3, 3): ret = _mm_shuffle_epi32_splat((a),3); break; \
871 		default: ret = _mm_shuffle_epi32_default((a), (imm)); break; \
872 	} \
873 	ret; \
874 })
875 
876 // Shuffles the upper 4 signed or unsigned 16 - bit integers in a as specified by imm.  https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
877 //FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a, __constrange(0,255) int imm)
878 #define _mm_shufflehi_epi16_function(a, imm) \
879 ({ \
880 	int16x8_t ret = vreinterpretq_s16_s32(a); \
881 	int16x4_t highBits = vget_high_s16(ret); \
882 	ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & 0x3), ret, 4); \
883 	ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, 5); \
884 	ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, 6); \
885 	ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, 7); \
886 	vreinterpretq_s32_s16(ret); \
887 })
888 
889 //FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a, __constrange(0,255) int imm)
890 #define _mm_shufflehi_epi16(a, imm) \
891 	_mm_shufflehi_epi16_function((a), (imm))
892 
893 
894 //added by hasindu
895 //Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while shifting in zeros.	https://msdn.microsoft.com/en-us/library/es73bcsy(v=vs.90).aspx
896 #define _mm_slli_epi16(a, imm) \
897 ({ \
898 	__m128i ret; \
899 	if ((imm) <= 0) {\
900 		ret = a; \
901 	} \
902 	else if ((imm) > 31) { \
903 		ret = _mm_setzero_si128(); \
904 	} \
905 	else { \
906 		ret = vreinterpretq_m128i_s16(vshlq_n_s16(vreinterpretq_s16_m128i(a), (imm))); \
907 	} \
908 	ret; \
909 })
910 
911 
912 
913 // Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while shifting in zeros. : https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
914 //FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, __constrange(0,255) int imm)
915 #define _mm_slli_epi32(a, imm) \
916 ({ \
917 	__m128i ret; \
918 	if ((imm) <= 0) {\
919 		ret = a; \
920 	} \
921 	else if ((imm) > 31) { \
922 		ret = _mm_setzero_si128(); \
923 	} \
924 	else { \
925 		ret = vreinterpretq_m128i_s32(vshlq_n_s32(vreinterpretq_s32_m128i(a), (imm))); \
926 	} \
927 	ret; \
928 })
929 
930 
931 //added by hasindu
932 // Shifts the 8 signed or unsigned 16-bit integers in a right by count bits while shifting in zeros.
933 //https://msdn.microsoft.com/en-us/library/6tcwd38t(v=vs.90).aspx
934 #define _mm_srli_epi16(a, imm) \
935 ({ \
936 	__m128i ret; \
937 	if ((imm) <= 0) { \
938 		ret = a; \
939 	} \
940 	else if ((imm)> 31) { \
941 		ret = _mm_setzero_si128(); \
942 	} \
943 	else { \
944 		ret = vreinterpretq_m128i_u16(vshrq_n_u16(vreinterpretq_u16_m128i(a), (imm))); \
945 	} \
946 	ret; \
947 })
948 
949 
950 //Shifts the 4 signed or unsigned 32-bit integers in a right by count bits while shifting in zeros.  https://msdn.microsoft.com/en-us/library/w486zcfa(v=vs.100).aspx
951 //FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
952 #define _mm_srli_epi32(a, imm) \
953 ({ \
954 	__m128i ret; \
955 	if ((imm) <= 0) { \
956 		ret = a; \
957 	} \
958 	else if ((imm)> 31) { \
959 		ret = _mm_setzero_si128(); \
960 	} \
961 	else { \
962 		ret = vreinterpretq_m128i_u32(vshrq_n_u32(vreinterpretq_u32_m128i(a), (imm))); \
963 	} \
964 	ret; \
965 })
966 
967 // Shifts the 4 signed 32 - bit integers in a right by count bits while shifting in the sign bit.  https://msdn.microsoft.com/en-us/library/z1939387(v=vs.100).aspx
968 //FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
969 #define _mm_srai_epi32(a, imm) \
970 ({ \
971 	__m128i ret; \
972 	if ((imm) <= 0) { \
973 		ret = a; \
974 	} \
975 	else if ((imm) > 31) { \
976 		ret = vreinterpretq_m128i_s32(vshrq_n_s32(vreinterpretq_s32_m128i(a), 16)); \
977 		ret = vreinterpretq_m128i_s32(vshrq_n_s32(vreinterpretq_s32_m128i(ret), 16)); \
978 	} \
979 	else { \
980 		ret = vreinterpretq_m128i_s32(vshrq_n_s32(vreinterpretq_s32_m128i(a), (imm))); \
981 	} \
982 	ret; \
983 })
984 
985 // Shifts the 128 - bit value in a right by imm bytes while shifting in zeros.imm must be an immediate. https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
986 //FORCE_INLINE _mm_srli_si128(__m128i a, __constrange(0,255) int imm)
987 #define _mm_srli_si128(a, imm) \
988 ({ \
989 	__m128i ret; \
990 	if ((imm) <= 0) { \
991 		ret = a; \
992 	} \
993 	else if ((imm) > 15) { \
994 		ret = _mm_setzero_si128(); \
995 	} \
996 	else { \
997 		ret = vreinterpretq_m128i_s8(vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), (imm))); \
998 	} \
999 	ret; \
1000 })
1001 
1002 // Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm must be an immediate.  https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
1003 //FORCE_INLINE __m128i _mm_slli_si128(__m128i a, __constrange(0,255) int imm)
1004 #define _mm_slli_si128(a, imm) \
1005 ({ \
1006 	__m128i ret; \
1007 	if ((imm) <= 0) { \
1008 		ret = a; \
1009 	} \
1010 	else if ((imm) > 15) { \
1011 		ret = _mm_setzero_si128(); \
1012 	} \
1013 	else { \
1014 		ret = vreinterpretq_m128i_s8(vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(a), 16 - (imm))); \
1015 	} \
1016 	ret; \
1017 })
1018 
1019 // NEON does not provide a version of this function, here is an article about some ways to repro the results.
1020 // http://stackoverflow.com/questions/11870910/sse-mm-movemask-epi8-equivalent-method-for-arm-neon
1021 // Creates a 16-bit mask from the most significant bits of the 16 signed or unsigned 8-bit integers in a and zero extends the upper bits. https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
_mm_movemask_epi8(__m128i _a)1022 FORCE_INLINE int _mm_movemask_epi8(__m128i _a)
1023 {
1024 	uint8x16_t input = vreinterpretq_u8_m128i(_a);
1025 	static const int8_t __attribute__((aligned(16))) xr[8] = { -7, -6, -5, -4, -3, -2, -1, 0 };
1026 	uint8x8_t mask_and = vdup_n_u8(0x80);
1027 	int8x8_t mask_shift = vld1_s8(xr);
1028 
1029 	uint8x8_t lo = vget_low_u8(input);
1030 	uint8x8_t hi = vget_high_u8(input);
1031 
1032 	lo = vand_u8(lo, mask_and);
1033 	lo = vshl_u8(lo, mask_shift);
1034 
1035 	hi = vand_u8(hi, mask_and);
1036 	hi = vshl_u8(hi, mask_shift);
1037 
1038 	lo = vpadd_u8(lo, lo);
1039 	lo = vpadd_u8(lo, lo);
1040 	lo = vpadd_u8(lo, lo);
1041 
1042 	hi = vpadd_u8(hi, hi);
1043 	hi = vpadd_u8(hi, hi);
1044 	hi = vpadd_u8(hi, hi);
1045 
1046 	return ((hi[0] << 8) | (lo[0] & 0xFF));
1047 }
1048 
1049 
1050 // ******************************************
1051 // Math operations
1052 // ******************************************
1053 
1054 // Subtracts the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
_mm_sub_ps(__m128 a,__m128 b)1055 FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
1056 {
1057 	return vreinterpretq_m128_f32(vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1058 }
1059 
1060 // Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or unsigned 32-bit integers of a. https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
_mm_sub_epi32(__m128i a,__m128i b)1061 FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
1062 {
1063 	return vreinterpretq_m128_f32(vsubq_s32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1064 }
1065 
_mm_sub_epi16(__m128i a,__m128i b)1066 FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
1067 {
1068 	return vreinterpretq_m128i_s16(vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1069 }
1070 
1071 //added by hasindu
_mm_sub_epi8(__m128i a,__m128i b)1072 FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
1073 {
1074 	return vreinterpretq_m128i_s8(vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
1075 }
1076 
1077 //added by hasindu
1078 //Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit integers of a and saturates.. https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx
_mm_subs_epu16(__m128i a,__m128i b)1079 FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
1080 {
1081 	return vreinterpretq_m128i_u16(vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
1082 }
1083 
1084 //added by hasindu
1085 //Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit integers of a and saturates.. https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90)
_mm_subs_epu8(__m128i a,__m128i b)1086 FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
1087 {
1088 	return vreinterpretq_m128i_u8(vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
1089 }
1090 
1091 // Adds the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
_mm_add_ps(__m128 a,__m128 b)1092 FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
1093 {
1094 	return vreinterpretq_m128_f32(vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1095 }
1096 
1097 // adds the scalar single-precision floating point values of a and b.  https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx
_mm_add_ss(__m128 a,__m128 b)1098 FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
1099 {
1100 	float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
1101 	float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
1102 	//the upper values in the result must be the remnants of <a>.
1103 	return vreinterpretq_m128_f32(vaddq_f32(a, value));
1104 }
1105 
1106 // Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or unsigned 32-bit integers in b. https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
_mm_add_epi32(__m128i a,__m128i b)1107 FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
1108 {
1109 	return vreinterpretq_m128i_s32(vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
1110 }
1111 
1112 // Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or unsigned 16-bit integers in b. https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
_mm_add_epi16(__m128i a,__m128i b)1113 FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
1114 {
1115 	return vreinterpretq_m128i_s16(vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1116 }
1117 
1118 //added by hasindu
1119 // Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or unsigned 8-bit integers in b. https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90)
_mm_add_epi8(__m128i a,__m128i b)1120 FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
1121 {
1122 	return vreinterpretq_m128i_s8(vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
1123 }
1124 
1125 //added by hasindu
1126 // Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b and saturates. https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx
_mm_adds_epi16(__m128i a,__m128i b)1127 FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
1128 {
1129 	return vreinterpretq_m128i_s16(vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1130 }
1131 
1132 //added by hasindu
1133 //Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in b and saturates.. https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx
_mm_adds_epu8(__m128i a,__m128i b)1134 FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
1135 {
1136 	return vreinterpretq_m128i_u8(vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
1137 }
1138 
1139 
1140 // Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or unsigned 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
_mm_mullo_epi16(__m128i a,__m128i b)1141 FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
1142 {
1143 	return vreinterpretq_m128i_s16(vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1144 }
1145 
1146 // Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or unsigned 32-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
_mm_mullo_epi32(__m128i a,__m128i b)1147 FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
1148 {
1149 	return vreinterpretq_m128i_s32(vmulq_s32(vreinterpretq_s32_m128i(a),vreinterpretq_s32_m128i(b)));
1150 }
1151 
1152 // Multiplies the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
_mm_mul_ps(__m128 a,__m128 b)1153 FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
1154 {
1155 	return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1156 }
1157 
1158 // Divides the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx
_mm_div_ps(__m128 a,__m128 b)1159 FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
1160 {
1161 	float32x4_t recip0 = vrecpeq_f32(vreinterpretq_f32_m128(b));
1162 	float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, vreinterpretq_f32_m128(b)));
1163 	return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip1));
1164 }
1165 
1166 // Divides the scalar single-precision floating point value of a by b.  https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx
_mm_div_ss(__m128 a,__m128 b)1167 FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
1168 {
1169 	float32_t value = vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
1170 	return vreinterpretq_m128_f32(vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
1171 }
1172 
1173 // This version does additional iterations to improve accuracy.  Between 1 and 4 recommended.
1174 // Computes the approximations of reciprocals of the four single-precision, floating-point values of a. https://msdn.microsoft.com/en-us/library/vstudio/796k1tty(v=vs.100).aspx
recipq_newton(__m128 in,int n)1175 FORCE_INLINE __m128 recipq_newton(__m128 in, int n)
1176 {
1177 	int i;
1178 	float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
1179 	for (i = 0; i < n; ++i)
1180 	{
1181 		recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
1182 	}
1183 	return vreinterpretq_m128_f32(recip);
1184 }
1185 
1186 // Computes the approximations of reciprocals of the four single-precision, floating-point values of a. https://msdn.microsoft.com/en-us/library/vstudio/796k1tty(v=vs.100).aspx
_mm_rcp_ps(__m128 in)1187 FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
1188 {
1189 	float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
1190 	recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
1191 	return vreinterpretq_m128_f32(recip);
1192 }
1193 
1194 // Computes the approximations of square roots of the four single-precision, floating-point values of a. First computes reciprocal square roots and then reciprocals of the four values. https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx
_mm_sqrt_ps(__m128 in)1195 FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
1196 {
1197 	float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in));
1198 	float32x4_t sq = vrecpeq_f32(recipsq);
1199 	// ??? use step versions of both sqrt and recip for better accuracy?
1200 	return vreinterpretq_m128_f32(sq);
1201 }
1202 
1203 // Computes the approximation of the square root of the scalar single-precision floating point value of in.  https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx
_mm_sqrt_ss(__m128 in)1204 FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
1205 {
1206 	float32_t value = vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
1207 	return vreinterpretq_m128_f32(vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
1208 }
1209 
1210 // Computes the approximations of the reciprocal square roots of the four single-precision floating point values of in.  https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx
_mm_rsqrt_ps(__m128 in)1211 FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
1212 {
1213 	return vreinterpretq_m128_f32(vrsqrteq_f32(vreinterpretq_f32_m128(in)));
1214 }
1215 
1216 // Computes the maximums of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx
_mm_max_ps(__m128 a,__m128 b)1217 FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
1218 {
1219 	return vreinterpretq_m128_f32(vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1220 }
1221 
1222 // Computes the minima of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx
_mm_min_ps(__m128 a,__m128 b)1223 FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
1224 {
1225 	return vreinterpretq_m128_f32(vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1226 }
1227 
1228 // Computes the maximum of the two lower scalar single-precision floating point values of a and b.  https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx
_mm_max_ss(__m128 a,__m128 b)1229 FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
1230 {
1231 	float32_t value = vgetq_lane_f32(vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1232 	return vreinterpretq_m128_f32(vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
1233 }
1234 
1235 // Computes the minimum of the two lower scalar single-precision floating point values of a and b.  https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx
_mm_min_ss(__m128 a,__m128 b)1236 FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
1237 {
1238 	float32_t value = vgetq_lane_f32(vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1239 	return vreinterpretq_m128_f32(vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
1240 }
1241 
1242 //added by hasindu
1243 //Computes the pairwise maxima of the 16 unsigned 8-bit integers from a and the 16 unsigned 8-bit integers from b. https://msdn.microsoft.com/en-us/library/st6634za(v=vs.100).aspx
_mm_max_epu8(__m128i a,__m128i b)1244 FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
1245 {
1246 	return vreinterpretq_m128i_u8(vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
1247 }
1248 
1249 //added by hasindu
1250 //Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the 16 unsigned 8-bit integers from b. https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx
_mm_min_epu8(__m128i a,__m128i b)1251 FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
1252 {
1253 	return vreinterpretq_m128i_u8(vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
1254 }
1255 
1256 
1257 // Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8 signed 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/6te997ew(v=vs.100).aspx
_mm_min_epi16(__m128i a,__m128i b)1258 FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
1259 {
1260 	return vreinterpretq_m128i_s16(vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1261 }
1262 
1263 //added by hasindu
1264 //Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8 signed 16-bit integers from b. https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx
_mm_max_epi16(__m128i a,__m128i b)1265 FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
1266 {
1267 	return vreinterpretq_m128i_s16(vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1268 }
1269 
1270 
1271 // epi versions of min/max
1272 // Computes the pariwise maximums of the four signed 32-bit integer values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx
_mm_max_epi32(__m128i a,__m128i b)1273 FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
1274 {
1275 	return vreinterpretq_m128i_s32(vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
1276 }
1277 
1278 // Computes the pariwise minima of the four signed 32-bit integer values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx
_mm_min_epi32(__m128i a,__m128i b)1279 FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
1280 {
1281 	return vreinterpretq_m128i_s32(vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
1282 }
1283 
1284 // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/59hddw1d(v=vs.100).aspx
_mm_mulhi_epi16(__m128i a,__m128i b)1285 FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
1286 {
1287 	/* apoty: issue with large values because of result saturation */
1288 	//int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)); /* =2*a*b */
1289 	//return vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
1290 	int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
1291 	int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
1292 	int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
1293 	int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
1294 	int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
1295 	int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
1296 	uint16x8x2_t r = vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
1297 	return vreinterpretq_m128i_u16(r.val[1]);
1298 }
1299 
1300 // Computes pairwise add of each argument as single-precision, floating-point values a and b.
1301 //https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx
_mm_hadd_ps(__m128 a,__m128 b)1302 FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b )
1303 {
1304 #if defined(__aarch64__)
1305 	return vreinterpretq_m128_f32(vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); //AArch64
1306 #else
1307 	float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
1308 	float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
1309 	float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
1310 	float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
1311 	return vreinterpretq_m128_f32(vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
1312 #endif
1313 }
1314 
1315 // ******************************************
1316 // Compare operations
1317 // ******************************************
1318 
1319 // Compares for less than https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx
_mm_cmplt_ps(__m128 a,__m128 b)1320 FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
1321 {
1322 	return vreinterpretq_m128_u32(vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1323 }
1324 
1325 // Compares for greater than. https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx
_mm_cmpgt_ps(__m128 a,__m128 b)1326 FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
1327 {
1328 	return vreinterpretq_m128_u32(vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1329 }
1330 
1331 // Compares for greater than or equal. https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx
_mm_cmpge_ps(__m128 a,__m128 b)1332 FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
1333 {
1334 	return vreinterpretq_m128_u32(vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1335 }
1336 
1337 // Compares for less than or equal. https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx
_mm_cmple_ps(__m128 a,__m128 b)1338 FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
1339 {
1340 	return vreinterpretq_m128_u32(vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1341 }
1342 
1343 // Compares for equality. https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx
_mm_cmpeq_ps(__m128 a,__m128 b)1344 FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
1345 {
1346 	return vreinterpretq_m128_u32(vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1347 }
1348 
1349 
1350 //added by hasindu
1351 //Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or unsigned 8-bit integers in b for equality. https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx
_mm_cmpeq_epi8(__m128i a,__m128i b)1352 FORCE_INLINE __m128i _mm_cmpeq_epi8 (__m128i a, __m128i b)
1353 {
1354 	return vreinterpretq_m128i_u8(vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
1355 }
1356 
1357 //added by hasindu
1358 //Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or unsigned 16-bit integers in b for equality.
1359 //https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx
_mm_cmpeq_epi16(__m128i a,__m128i b)1360 FORCE_INLINE __m128i _mm_cmpeq_epi16 (__m128i a, __m128i b)
1361 {
1362 	return vreinterpretq_m128i_u16(vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1363 }
1364 
1365 //added by hasindu
1366 //Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers in b for lesser than. https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx
_mm_cmplt_epi8(__m128i a,__m128i b)1367 FORCE_INLINE __m128i _mm_cmplt_epi8 (__m128i a, __m128i b)
1368 {
1369 	return vreinterpretq_m128i_u8(vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
1370 }
1371 
1372 
1373 //added by hasindu
1374 //Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers in b for greater than. https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx
_mm_cmpgt_epi8(__m128i a,__m128i b)1375 FORCE_INLINE __m128i _mm_cmpgt_epi8 (__m128i a, __m128i b)
1376 {
1377 	return vreinterpretq_m128i_u8(vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
1378 }
1379 
1380 //added by hasindu
1381 //Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers in b for greater than. https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx
_mm_cmpgt_epi16(__m128i a,__m128i b)1382 FORCE_INLINE __m128i _mm_cmpgt_epi16 (__m128i a, __m128i b)
1383 {
1384 	return vreinterpretq_m128i_u16(vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
1385 }
1386 
1387 
1388 // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers in b for less than. https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx
_mm_cmplt_epi32(__m128i a,__m128i b)1389 FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
1390 {
1391 	return vreinterpretq_m128i_u32(vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
1392 }
1393 
1394 // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers in b for greater than. https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx
_mm_cmpgt_epi32(__m128i a,__m128i b)1395 FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
1396 {
1397 	return vreinterpretq_m128i_u32(vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
1398 }
1399 
1400 // Compares the four 32-bit floats in a and b to check if any values are NaN. Ordered compare between each value returns true for "orderable" and false for "not orderable" (NaN). https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx
1401 // see also:
1402 // http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
1403 // http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
_mm_cmpord_ps(__m128 a,__m128 b)1404 FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b )
1405 {
1406 	// Note: NEON does not have ordered compare builtin
1407 	// Need to compare a eq a and b eq b to check for NaN
1408 	// Do AND of results to get final
1409 	uint32x4_t ceqaa = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1410 	uint32x4_t ceqbb = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1411 	return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
1412 }
1413 
1414 // Compares the lower single-precision floating point scalar values of a and b using a less than operation. : https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx
1415 // Important note!! The documentation on MSDN is incorrect!  If either of the values is a NAN the docs say you will get a one, but in fact, it will return a zero!!
_mm_comilt_ss(__m128 a,__m128 b)1416 FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
1417 {
1418 	uint32x4_t a_not_nan = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1419 	uint32x4_t b_not_nan = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1420 	uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
1421 	uint32x4_t a_lt_b = vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
1422 	return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0) ? 1 : 0;
1423 }
1424 
1425 // Compares the lower single-precision floating point scalar values of a and b using a greater than operation. : https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx
_mm_comigt_ss(__m128 a,__m128 b)1426 FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
1427 {
1428 	//return vgetq_lane_u32(vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1429 	uint32x4_t a_not_nan = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1430 	uint32x4_t b_not_nan = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1431 	uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
1432 	uint32x4_t a_gt_b = vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
1433 	return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0) ? 1 : 0;
1434 }
1435 
1436 // Compares the lower single-precision floating point scalar values of a and b using a less than or equal operation. : https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx
_mm_comile_ss(__m128 a,__m128 b)1437 FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
1438 {
1439 	//return vgetq_lane_u32(vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1440 	uint32x4_t a_not_nan = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1441 	uint32x4_t b_not_nan = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1442 	uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
1443 	uint32x4_t a_le_b = vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
1444 	return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0) ? 1 : 0;
1445 }
1446 
1447 // Compares the lower single-precision floating point scalar values of a and b using a greater than or equal operation. : https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx
_mm_comige_ss(__m128 a,__m128 b)1448 FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
1449 {
1450 	//return vgetq_lane_u32(vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1451 	uint32x4_t a_not_nan = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1452 	uint32x4_t b_not_nan = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1453 	uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
1454 	uint32x4_t a_ge_b = vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
1455 	return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0) ? 1 : 0;
1456 }
1457 
1458 // Compares the lower single-precision floating point scalar values of a and b using an equality operation. : https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx
_mm_comieq_ss(__m128 a,__m128 b)1459 FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
1460 {
1461 	//return vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1462 	uint32x4_t a_not_nan = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1463 	uint32x4_t b_not_nan = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1464 	uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
1465 	uint32x4_t a_eq_b = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
1466 	return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0) ? 1 : 0;
1467 }
1468 
1469 // Compares the lower single-precision floating point scalar values of a and b using an inequality operation. : https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx
_mm_comineq_ss(__m128 a,__m128 b)1470 FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
1471 {
1472 	//return !vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)), 0);
1473 	uint32x4_t a_not_nan = vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
1474 	uint32x4_t b_not_nan = vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
1475 	uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
1476 	uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
1477 	return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0) ? 1 : 0;
1478 }
1479 
1480 // according to the documentation, these intrinsics behave the same as the non-'u' versions.  We'll just alias them here.
1481 #define _mm_ucomilt_ss      _mm_comilt_ss
1482 #define _mm_ucomile_ss      _mm_comile_ss
1483 #define _mm_ucomigt_ss      _mm_comigt_ss
1484 #define _mm_ucomige_ss      _mm_comige_ss
1485 #define _mm_ucomieq_ss      _mm_comieq_ss
1486 #define _mm_ucomineq_ss     _mm_comineq_ss
1487 
1488 // ******************************************
1489 // Conversions
1490 // ******************************************
1491 
1492 // Converts the four single-precision, floating-point values of a to signed 32-bit integer values using truncate. https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx
_mm_cvttps_epi32(__m128 a)1493 FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
1494 {
1495 	return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
1496 }
1497 
1498 // Converts the four signed 32-bit integer values of a to single-precision, floating-point values https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx
_mm_cvtepi32_ps(__m128i a)1499 FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
1500 {
1501 	return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
1502 }
1503 
1504 // Converts the four unsigned 8-bit integers in the lower 32 bits to four unsigned 32-bit integers. https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx
_mm_cvtepu8_epi32(__m128i a)1505 FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
1506 {
1507 	uint8x16_t u8x16 = vreinterpretq_u8_s32(a);        /* xxxx xxxx xxxx DCBA */
1508 	uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16));   /* 0x0x 0x0x 0D0C 0B0A */
1509 	uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
1510 	return vreinterpretq_s32_u32(u32x4);
1511 }
1512 
1513 // Converts the four signed 16-bit integers in the lower 64 bits to four signed 32-bit integers. https://msdn.microsoft.com/en-us/library/bb514079%28v=vs.100%29.aspx
_mm_cvtepi16_epi32(__m128i a)1514 FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
1515 {
1516 	return vreinterpretq_m128i_s32(vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
1517 }
1518 
1519 // Converts the four single-precision, floating-point values of a to signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx
1520 // *NOTE*. The default rounding mode on SSE is 'round to even', which ArmV7 does not support!
1521 // It is supported on ARMv8 however.
_mm_cvtps_epi32(__m128 a)1522 FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
1523 {
1524 #if defined(__aarch64__)
1525 	return vcvtnq_s32_f32(a);
1526 #else
1527     uint32x4_t signmask = vdupq_n_u32(0x80000000);
1528     float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a), vdupq_n_f32(0.5f)); /* +/- 0.5 */
1529     int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
1530     int32x4_t r_trunc = vcvtq_s32_f32(vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
1531     int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
1532     int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone), vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
1533     float32x4_t delta = vsubq_f32(vreinterpretq_f32_m128(a), vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
1534     uint32x4_t is_delta_half = vceqq_f32(delta, half); /* delta == +/- 0.5 */
1535     return vreinterpretq_m128i_s32(vbslq_s32(is_delta_half, r_even, r_normal));
1536 #endif
1537 }
1538 
1539 // Moves the least significant 32 bits of a to a 32-bit integer. https://msdn.microsoft.com/en-us/library/5z7a9642%28v=vs.90%29.aspx
_mm_cvtsi128_si32(__m128i a)1540 FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
1541 {
1542 	return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
1543 }
1544 
1545 // Moves 32-bit integer a to the least significant 32 bits of an __m128 object, zero extending the upper bits. https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx
_mm_cvtsi32_si128(int a)1546 FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
1547 {
1548 	return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
1549 }
1550 
1551 
1552 // Applies a type cast to reinterpret four 32-bit floating point values passed in as a 128-bit parameter as packed 32-bit integers. https://msdn.microsoft.com/en-us/library/bb514099.aspx
_mm_castps_si128(__m128 a)1553 FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
1554 {
1555 	return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
1556 }
1557 
1558 // Applies a type cast to reinterpret four 32-bit integers passed in as a 128-bit parameter as packed 32-bit floating point values. https://msdn.microsoft.com/en-us/library/bb514029.aspx
_mm_castsi128_ps(__m128i a)1559 FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
1560 {
1561 	return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
1562 }
1563 
1564 // Loads 128-bit value. : https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx
_mm_load_si128(const __m128i * p)1565 FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
1566 {
1567 	return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *)p));
1568 }
1569 
1570 //added by hasindu (verify this for requirement of alignment)
1571 // Loads 128-bit value. : https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx
_mm_loadu_si128(const __m128i * p)1572 FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
1573 {
1574 	return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *)p));
1575 }
1576 
1577 
1578 // ******************************************
1579 // Miscellaneous Operations
1580 // ******************************************
1581 
1582 // Packs the 16 signed 16-bit integers from a and b into 8-bit integers and saturates. https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx
_mm_packs_epi16(__m128i a,__m128i b)1583 FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
1584 {
1585 	return vreinterpretq_m128i_s8(vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)), vqmovn_s16(vreinterpretq_s16_m128i(b))));
1586 }
1587 
1588 // Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned integers and saturates. https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx
_mm_packus_epi16(const __m128i a,const __m128i b)1589 FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
1590 {
1591 	return vreinterpretq_m128i_u8(vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)), vqmovun_s16(vreinterpretq_s16_m128i(b))));
1592 }
1593 
1594 // Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers and saturates. https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx
_mm_packs_epi32(__m128i a,__m128i b)1595 FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
1596 {
1597 	return vreinterpretq_m128i_s16(vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)), vqmovn_s32(vreinterpretq_s32_m128i(b))));
1598 }
1599 
1600 // Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower 8 signed or unsigned 8-bit integers in b.  https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx
_mm_unpacklo_epi8(__m128i a,__m128i b)1601 FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
1602 {
1603 	int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
1604 	int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
1605 	int8x8x2_t result = vzip_s8(a1, b1);
1606 	return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
1607 }
1608 
1609 // Interleaves the lower 4 signed or unsigned 16-bit integers in a with the lower 4 signed or unsigned 16-bit integers in b.  https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx
_mm_unpacklo_epi16(__m128i a,__m128i b)1610 FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
1611 {
1612 	int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
1613 	int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
1614 	int16x4x2_t result = vzip_s16(a1, b1);
1615 	return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
1616 }
1617 
1618 // Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the lower 2 signed or unsigned 32 - bit integers in b.  https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx
_mm_unpacklo_epi32(__m128i a,__m128i b)1619 FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
1620 {
1621 	int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
1622 	int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
1623 	int32x2x2_t result = vzip_s32(a1, b1);
1624 	return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
1625 }
1626 
1627 // Selects and interleaves the lower two single-precision, floating-point values from a and b. https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx
_mm_unpacklo_ps(__m128 a,__m128 b)1628 FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
1629 {
1630 	float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
1631 	float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
1632 	float32x2x2_t result = vzip_f32(a1, b1);
1633 	return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
1634 }
1635 
1636 // Selects and interleaves the upper two single-precision, floating-point values from a and b. https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx
_mm_unpackhi_ps(__m128 a,__m128 b)1637 FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
1638 {
1639 	float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
1640 	float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
1641 	float32x2x2_t result = vzip_f32(a1, b1);
1642 	return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
1643 }
1644 
1645 // Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper 8 signed or unsigned 8-bit integers in b.  https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx
_mm_unpackhi_epi8(__m128i a,__m128i b)1646 FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
1647 {
1648 	int8x8_t a1 = vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
1649 	int8x8_t b1 = vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
1650 	int8x8x2_t result = vzip_s8(a1, b1);
1651 	return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
1652 }
1653 
1654 // Interleaves the upper 4 signed or unsigned 16-bit integers in a with the upper 4 signed or unsigned 16-bit integers in b.  https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx
_mm_unpackhi_epi16(__m128i a,__m128i b)1655 FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
1656 {
1657 	int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
1658 	int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
1659 	int16x4x2_t result = vzip_s16(a1, b1);
1660 	return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
1661 }
1662 
1663 // Interleaves the upper 2 signed or unsigned 32-bit integers in a with the upper 2 signed or unsigned 32-bit integers in b.  https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx
_mm_unpackhi_epi32(__m128i a,__m128i b)1664 FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
1665 {
1666 	int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
1667 	int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
1668 	int32x2x2_t result = vzip_s32(a1, b1);
1669 	return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
1670 }
1671 
1672 // Extracts the selected signed or unsigned 16-bit integer from a and zero extends.  https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx
1673 //FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
1674 #define _mm_extract_epi16(a, imm) \
1675 ({ \
1676 	(vgetq_lane_s16(vreinterpretq_s16_m128i(a), (imm)) & 0x0000ffffUL); \
1677 })
1678 
1679 // Inserts the least significant 16 bits of b into the selected 16-bit integer of a. https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx
1680 //FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, const int b, __constrange(0,8) int imm)
1681 #define _mm_insert_epi16(a, b, imm) \
1682 ({ \
1683 	vreinterpretq_m128i_s16(vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \
1684 })
1685 
1686 // ******************************************
1687 // Streaming Extensions
1688 // ******************************************
1689 
1690 // Guarantees that every preceding store is globally visible before any subsequent store.  https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx
_mm_sfence(void)1691 FORCE_INLINE void _mm_sfence(void)
1692 {
1693 	__sync_synchronize();
1694 }
1695 
1696 // Stores the data in a to the address p without polluting the caches.  If the cache line containing address p is already in the cache, the cache will be updated.Address p must be 16 - byte aligned.  https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
_mm_stream_si128(__m128i * p,__m128i a)1697 FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
1698 {
1699 	*p = a;
1700 }
1701 
1702 // Cache line containing p is flushed and invalidated from all caches in the coherency domain. : https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx
_mm_clflush(void const * p)1703 FORCE_INLINE void _mm_clflush(void const*p)
1704 {
1705 	// no corollary for Neon?
1706 }
1707 
1708 #if defined(__GNUC__) || defined(__clang__)
1709 #	pragma pop_macro("ALIGN_STRUCT")
1710 #	pragma pop_macro("FORCE_INLINE")
1711 #endif
1712 
1713 #endif
1714