1 /** 2 * This file has no copyright assigned and is placed in the Public Domain. 3 * This file is part of the w64 mingw-runtime package. 4 * No warranty is given; refer to the file DISCLAIMER within this package. 5 */ 6 #ifndef _FVEC_H_INCLUDED 7 #define _FVEC_H_INCLUDED 8 9 #ifndef RC_INVOKED 10 #ifndef __cplusplus 11 #error ERROR: This file is only supported in C++ compilations! 12 #endif 13 14 #include <xmmintrin.h> 15 #include <assert.h> 16 #include <ivec.h> 17 #include <corecrt.h> 18 19 #if defined(_ENABLE_VEC_DEBUG) 20 #include <iostream> 21 #endif 22 23 #pragma pack(push,_CRT_PACKING) 24 #pragma pack(push,16) 25 26 #define EXPLICIT explicit 27 28 class F32vec4 { 29 protected: 30 __m128 vec; 31 public: 32 F32vec4() {} 33 F32vec4(__m128 m) { vec = m;} 34 F32vec4(float f3,float f2,float f1,float f0) { vec= _mm_set_ps(f3,f2,f1,f0); } 35 EXPLICIT F32vec4(float f) { vec = _mm_set_ps1(f); } 36 EXPLICIT F32vec4(double d) { vec = _mm_set_ps1((float) d); } 37 F32vec4& operator =(float f) { vec = _mm_set_ps1(f); return *this; } 38 F32vec4& operator =(double d) { vec = _mm_set_ps1((float) d); return *this; } 39 operator __m128() const { return vec; } 40 friend F32vec4 operator &(const F32vec4 &a,const F32vec4 &b) { return _mm_and_ps(a,b); } 41 friend F32vec4 operator |(const F32vec4 &a,const F32vec4 &b) { return _mm_or_ps(a,b); } 42 friend F32vec4 operator ^(const F32vec4 &a,const F32vec4 &b) { return _mm_xor_ps(a,b); } 43 friend F32vec4 operator +(const F32vec4 &a,const F32vec4 &b) { return _mm_add_ps(a,b); } 44 friend F32vec4 operator -(const F32vec4 &a,const F32vec4 &b) { return _mm_sub_ps(a,b); } 45 friend F32vec4 operator *(const F32vec4 &a,const F32vec4 &b) { return _mm_mul_ps(a,b); } 46 friend F32vec4 operator /(const F32vec4 &a,const F32vec4 &b) { return _mm_div_ps(a,b); } 47 F32vec4& operator =(const F32vec4 &a) { vec = a.vec; return *this; } 48 F32vec4& operator =(const __m128 &avec) { vec = avec; return *this; } 49 F32vec4& operator +=(F32vec4 &a) { return *this = _mm_add_ps(vec,a); } 50 F32vec4& operator -=(F32vec4 &a) { return *this = _mm_sub_ps(vec,a); } 51 F32vec4& operator *=(F32vec4 &a) { return *this = _mm_mul_ps(vec,a); } 52 F32vec4& operator /=(F32vec4 &a) { return *this = _mm_div_ps(vec,a); } 53 F32vec4& operator &=(F32vec4 &a) { return *this = _mm_and_ps(vec,a); } 54 F32vec4& operator |=(F32vec4 &a) { return *this = _mm_or_ps(vec,a); } 55 F32vec4& operator ^=(F32vec4 &a) { return *this = _mm_xor_ps(vec,a); } 56 friend float add_horizontal(F32vec4 &a) { 57 F32vec4 ftemp = _mm_add_ss(a,_mm_add_ss(_mm_shuffle_ps(a,a,1),_mm_add_ss(_mm_shuffle_ps(a,a,2),_mm_shuffle_ps(a,a,3)))); 58 return ftemp[0]; 59 } 60 friend F32vec4 sqrt(const F32vec4 &a) { return _mm_sqrt_ps(a); } 61 friend F32vec4 rcp(const F32vec4 &a) { return _mm_rcp_ps(a); } 62 friend F32vec4 rsqrt(const F32vec4 &a) { return _mm_rsqrt_ps(a); } 63 friend F32vec4 rcp_nr(const F32vec4 &a) { 64 F32vec4 Ra0 = _mm_rcp_ps(a); 65 return _mm_sub_ps(_mm_add_ps(Ra0,Ra0),_mm_mul_ps(_mm_mul_ps(Ra0,a),Ra0)); 66 } 67 friend F32vec4 rsqrt_nr(const F32vec4 &a) { 68 static const F32vec4 fvecf0pt5(0.5f); 69 static const F32vec4 fvecf3pt0(3.0f); 70 F32vec4 Ra0 = _mm_rsqrt_ps(a); 71 return (fvecf0pt5 *Ra0) *(fvecf3pt0 - (a *Ra0) *Ra0); 72 73 } 74 #define Fvec32s4_COMP(op) friend F32vec4 cmp##op (const F32vec4 &a,const F32vec4 &b) { return _mm_cmp##op##_ps(a,b); } 75 Fvec32s4_COMP(eq) 76 Fvec32s4_COMP(lt) 77 Fvec32s4_COMP(le) 78 Fvec32s4_COMP(gt) 79 Fvec32s4_COMP(ge) 80 Fvec32s4_COMP(neq) 81 Fvec32s4_COMP(nlt) 82 Fvec32s4_COMP(nle) 83 Fvec32s4_COMP(ngt) 84 Fvec32s4_COMP(nge) 85 #undef Fvec32s4_COMP 86 87 friend F32vec4 simd_min(const F32vec4 &a,const F32vec4 &b) { return _mm_min_ps(a,b); } 88 friend F32vec4 simd_max(const F32vec4 &a,const F32vec4 &b) { return _mm_max_ps(a,b); } 89 90 #if defined(_ENABLE_VEC_DEBUG) 91 friend std::ostream & operator<<(std::ostream & os,const F32vec4 &a) { 92 float *fp = (float*)&a; 93 os << "[3]:" << *(fp+3) 94 << " [2]:" << *(fp+2) 95 << " [1]:" << *(fp+1) 96 << " [0]:" << *fp; 97 return os; 98 } 99 #endif 100 const float& operator[](int i) const { 101 assert((0 <= i) && (i <= 3)); 102 float *fp = (float*)&vec; 103 return *(fp+i); 104 } 105 float& operator[](int i) { 106 assert((0 <= i) && (i <= 3)); 107 float *fp = (float*)&vec; 108 return *(fp+i); 109 } 110 }; 111 112 inline F32vec4 unpack_low(const F32vec4 &a,const F32vec4 &b) { return _mm_unpacklo_ps(a,b); } 113 inline F32vec4 unpack_high(const F32vec4 &a,const F32vec4 &b) { return _mm_unpackhi_ps(a,b); } 114 inline int move_mask(const F32vec4 &a) { return _mm_movemask_ps(a); } 115 inline void loadu(F32vec4 &a,float *p) { a = _mm_loadu_ps(p); } 116 inline void storeu(float *p,const F32vec4 &a) { _mm_storeu_ps(p,a); } 117 inline void store_nta(float *p,F32vec4 &a) { _mm_stream_ps(p,a); } 118 119 #define Fvec32s4_SELECT(op) inline F32vec4 select_##op (const F32vec4 &a,const F32vec4 &b,const F32vec4 &c,const F32vec4 &d) { F32vec4 mask = _mm_cmp##op##_ps(a,b); return((mask & c) | F32vec4((_mm_andnot_ps(mask,d)))); } 120 Fvec32s4_SELECT(eq) 121 Fvec32s4_SELECT(lt) 122 Fvec32s4_SELECT(le) 123 Fvec32s4_SELECT(gt) 124 Fvec32s4_SELECT(ge) 125 Fvec32s4_SELECT(neq) 126 Fvec32s4_SELECT(nlt) 127 Fvec32s4_SELECT(nle) 128 Fvec32s4_SELECT(ngt) 129 Fvec32s4_SELECT(nge) 130 #undef Fvec32s4_SELECT 131 132 inline Is16vec4 simd_max(const Is16vec4 &a,const Is16vec4 &b) { return _m_pmaxsw(a,b); } 133 inline Is16vec4 simd_min(const Is16vec4 &a,const Is16vec4 &b) { return _m_pminsw(a,b); } 134 inline Iu8vec8 simd_max(const Iu8vec8 &a,const Iu8vec8 &b) { return _m_pmaxub(a,b); } 135 inline Iu8vec8 simd_min(const Iu8vec8 &a,const Iu8vec8 &b) { return _m_pminub(a,b); } 136 inline Iu16vec4 simd_avg(const Iu16vec4 &a,const Iu16vec4 &b) { return _m_pavgw(a,b); } 137 inline Iu8vec8 simd_avg(const Iu8vec8 &a,const Iu8vec8 &b) { return _m_pavgb(a,b); } 138 inline int move_mask(const I8vec8 &a) { return _m_pmovmskb(a); } 139 inline Iu16vec4 mul_high(const Iu16vec4 &a,const Iu16vec4 &b) { return _m_pmulhuw(a,b); } 140 inline void mask_move(const I8vec8 &a,const I8vec8 &b,char *addr) { _m_maskmovq(a,b,addr); } 141 inline void store_nta(__m64 *p,M64 &a) { _mm_stream_pi(p,a); } 142 inline int F32vec4ToInt(const F32vec4 &a) { return _mm_cvtt_ss2si(a); } 143 inline Is32vec2 F32vec4ToIs32vec2 (const F32vec4 &a) { 144 __m64 result; 145 result = _mm_cvtt_ps2pi(a); 146 return Is32vec2(result); 147 } 148 149 inline F32vec4 IntToF32vec4(const F32vec4 &a,int i) { 150 __m128 result; 151 result = _mm_cvt_si2ss(a,i); 152 return F32vec4(result); 153 } 154 155 inline F32vec4 Is32vec2ToF32vec4(const F32vec4 &a,const Is32vec2 &b) { 156 __m128 result; 157 result = _mm_cvt_pi2ps(a,b); 158 return F32vec4(result); 159 } 160 161 class F32vec1 { 162 protected: 163 __m128 vec; 164 public: 165 F32vec1() {} 166 F32vec1(int i) { vec = _mm_cvt_si2ss(vec,i);}; 167 EXPLICIT F32vec1(float f) { vec = _mm_set_ss(f); } 168 EXPLICIT F32vec1(double d) { vec = _mm_set_ss((float) d); } 169 F32vec1(__m128 m) { vec = m; } 170 operator __m128() const { return vec; } 171 friend F32vec1 operator &(const F32vec1 &a,const F32vec1 &b) { return _mm_and_ps(a,b); } 172 friend F32vec1 operator |(const F32vec1 &a,const F32vec1 &b) { return _mm_or_ps(a,b); } 173 friend F32vec1 operator ^(const F32vec1 &a,const F32vec1 &b) { return _mm_xor_ps(a,b); } 174 friend F32vec1 operator +(const F32vec1 &a,const F32vec1 &b) { return _mm_add_ss(a,b); } 175 friend F32vec1 operator -(const F32vec1 &a,const F32vec1 &b) { return _mm_sub_ss(a,b); } 176 friend F32vec1 operator *(const F32vec1 &a,const F32vec1 &b) { return _mm_mul_ss(a,b); } 177 friend F32vec1 operator /(const F32vec1 &a,const F32vec1 &b) { return _mm_div_ss(a,b); } 178 F32vec1& operator +=(F32vec1 &a) { return *this = _mm_add_ss(vec,a); } 179 F32vec1& operator -=(F32vec1 &a) { return *this = _mm_sub_ss(vec,a); } 180 F32vec1& operator *=(F32vec1 &a) { return *this = _mm_mul_ss(vec,a); } 181 F32vec1& operator /=(F32vec1 &a) { return *this = _mm_div_ss(vec,a); } 182 F32vec1& operator &=(F32vec1 &a) { return *this = _mm_and_ps(vec,a); } 183 F32vec1& operator |=(F32vec1 &a) { return *this = _mm_or_ps(vec,a); } 184 F32vec1& operator ^=(F32vec1 &a) { return *this = _mm_xor_ps(vec,a); } 185 friend F32vec1 sqrt(const F32vec1 &a) { return _mm_sqrt_ss(a); } 186 friend F32vec1 rcp(const F32vec1 &a) { return _mm_rcp_ss(a); } 187 friend F32vec1 rsqrt(const F32vec1 &a) { return _mm_rsqrt_ss(a); } 188 friend F32vec1 rcp_nr(const F32vec1 &a) { 189 F32vec1 Ra0 = _mm_rcp_ss(a); 190 return _mm_sub_ss(_mm_add_ss(Ra0,Ra0),_mm_mul_ss(_mm_mul_ss(Ra0,a),Ra0)); 191 } 192 friend F32vec1 rsqrt_nr(const F32vec1 &a) { 193 static const F32vec1 fvecf0pt5(0.5f); 194 static const F32vec1 fvecf3pt0(3.0f); 195 F32vec1 Ra0 = _mm_rsqrt_ss(a); 196 return (fvecf0pt5 *Ra0) *(fvecf3pt0 - (a *Ra0) *Ra0); 197 } 198 #define Fvec32s1_COMP(op) friend F32vec1 cmp##op (const F32vec1 &a,const F32vec1 &b) { return _mm_cmp##op##_ss(a,b); } 199 Fvec32s1_COMP(eq) 200 Fvec32s1_COMP(lt) 201 Fvec32s1_COMP(le) 202 Fvec32s1_COMP(gt) 203 Fvec32s1_COMP(ge) 204 Fvec32s1_COMP(neq) 205 Fvec32s1_COMP(nlt) 206 Fvec32s1_COMP(nle) 207 Fvec32s1_COMP(ngt) 208 Fvec32s1_COMP(nge) 209 #undef Fvec32s1_COMP 210 211 friend F32vec1 simd_min(const F32vec1 &a,const F32vec1 &b) { return _mm_min_ss(a,b); } 212 friend F32vec1 simd_max(const F32vec1 &a,const F32vec1 &b) { return _mm_max_ss(a,b); } 213 214 #if defined(_ENABLE_VEC_DEBUG) 215 friend std::ostream & operator<<(std::ostream & os,const F32vec1 &a) { 216 float *fp = (float*)&a; 217 os << "float:" << *fp; 218 return os; 219 } 220 #endif 221 }; 222 223 #define Fvec32s1_SELECT(op) inline F32vec1 select_##op (const F32vec1 &a,const F32vec1 &b,const F32vec1 &c,const F32vec1 &d) { F32vec1 mask = _mm_cmp##op##_ss(a,b); return((mask & c) | F32vec1((_mm_andnot_ps(mask,d)))); } 224 Fvec32s1_SELECT(eq) 225 Fvec32s1_SELECT(lt) 226 Fvec32s1_SELECT(le) 227 Fvec32s1_SELECT(gt) 228 Fvec32s1_SELECT(ge) 229 Fvec32s1_SELECT(neq) 230 Fvec32s1_SELECT(nlt) 231 Fvec32s1_SELECT(nle) 232 Fvec32s1_SELECT(ngt) 233 Fvec32s1_SELECT(nge) 234 #undef Fvec32s1_SELECT 235 236 inline int F32vec1ToInt(const F32vec1 &a) 237 { 238 return _mm_cvtt_ss2si(a); 239 } 240 241 #pragma pack(pop) 242 #pragma pack(pop) 243 #endif 244 #endif 245