1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #ifndef VPX_PORTS_X86_H_
13 #define VPX_PORTS_X86_H_
14 #include <stdlib.h>
15
16 #if defined(_MSC_VER)
17 #include <intrin.h> /* For __cpuidex, __rdtsc */
18 #endif
19
20 #include "vpx_config.h"
21 #include "vpx/vpx_integer.h"
22
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26
27 typedef enum {
28 VPX_CPU_UNKNOWN = -1,
29 VPX_CPU_AMD,
30 VPX_CPU_AMD_OLD,
31 VPX_CPU_CENTAUR,
32 VPX_CPU_CYRIX,
33 VPX_CPU_INTEL,
34 VPX_CPU_NEXGEN,
35 VPX_CPU_NSC,
36 VPX_CPU_RISE,
37 VPX_CPU_SIS,
38 VPX_CPU_TRANSMETA,
39 VPX_CPU_TRANSMETA_OLD,
40 VPX_CPU_UMC,
41 VPX_CPU_VIA,
42
43 VPX_CPU_LAST
44 } vpx_cpu_t;
45
46 #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
47 #if ARCH_X86_64
48 #define cpuid(func, func2, ax, bx, cx, dx)\
49 __asm__ __volatile__ (\
50 "cpuid \n\t" \
51 : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
52 : "a" (func), "c" (func2));
53 #else
54 #define cpuid(func, func2, ax, bx, cx, dx)\
55 __asm__ __volatile__ (\
56 "mov %%ebx, %%edi \n\t" \
57 "cpuid \n\t" \
58 "xchg %%edi, %%ebx \n\t" \
59 : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
60 : "a" (func), "c" (func2));
61 #endif
62 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
63 #if ARCH_X86_64
64 #define cpuid(func, func2, ax, bx, cx, dx)\
65 asm volatile (\
66 "xchg %rsi, %rbx \n\t" \
67 "cpuid \n\t" \
68 "movl %ebx, %edi \n\t" \
69 "xchg %rsi, %rbx \n\t" \
70 : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
71 : "a" (func), "c" (func2));
72 #else
73 #define cpuid(func, func2, ax, bx, cx, dx)\
74 asm volatile (\
75 "pushl %ebx \n\t" \
76 "cpuid \n\t" \
77 "movl %ebx, %edi \n\t" \
78 "popl %ebx \n\t" \
79 : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
80 : "a" (func), "c" (func2));
81 #endif
82 #else /* end __SUNPRO__ */
83 #if ARCH_X86_64
84 #if defined(_MSC_VER) && _MSC_VER > 1500
85 #define cpuid(func, func2, a, b, c, d) do {\
86 int regs[4];\
87 __cpuidex(regs, func, func2); \
88 a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\
89 } while(0)
90 #else
91 #define cpuid(func, func2, a, b, c, d) do {\
92 int regs[4];\
93 __cpuid(regs, func); \
94 a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\
95 } while (0)
96 #endif
97 #else
98 #define cpuid(func, func2, a, b, c, d)\
99 __asm mov eax, func\
100 __asm mov ecx, func2\
101 __asm cpuid\
102 __asm mov a, eax\
103 __asm mov b, ebx\
104 __asm mov c, ecx\
105 __asm mov d, edx
106 #endif
107 #endif /* end others */
108
109 // NaCl has no support for xgetbv or the raw opcode.
110 #if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__))
xgetbv(void)111 static INLINE uint64_t xgetbv(void) {
112 const uint32_t ecx = 0;
113 uint32_t eax, edx;
114 // Use the raw opcode for xgetbv for compatibility with older toolchains.
115 __asm__ volatile (
116 ".byte 0x0f, 0x01, 0xd0\n"
117 : "=a"(eax), "=d"(edx) : "c" (ecx));
118 return ((uint64_t)edx << 32) | eax;
119 }
120 #elif (defined(_M_X64) || defined(_M_IX86)) && \
121 defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219 // >= VS2010 SP1
122 #include <immintrin.h>
123 #define xgetbv() _xgetbv(0)
124 #elif defined(_MSC_VER) && defined(_M_IX86)
xgetbv(void)125 static INLINE uint64_t xgetbv(void) {
126 uint32_t eax_, edx_;
127 __asm {
128 xor ecx, ecx // ecx = 0
129 // Use the raw opcode for xgetbv for compatibility with older toolchains.
130 __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0
131 mov eax_, eax
132 mov edx_, edx
133 }
134 return ((uint64_t)edx_ << 32) | eax_;
135 }
136 #else
137 #define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains.
138 #endif
139
140 #if defined(_MSC_VER) && _MSC_VER >= 1700
141 #include <windows.h>
142 #if WINAPI_FAMILY_PARTITION(WINAPI_FAMILY_APP)
143 #define getenv(x) NULL
144 #endif
145 #endif
146
147 #define HAS_MMX 0x01
148 #define HAS_SSE 0x02
149 #define HAS_SSE2 0x04
150 #define HAS_SSE3 0x08
151 #define HAS_SSSE3 0x10
152 #define HAS_SSE4_1 0x20
153 #define HAS_AVX 0x40
154 #define HAS_AVX2 0x80
155 #ifndef BIT
156 #define BIT(n) (1<<n)
157 #endif
158
159 static INLINE int
x86_simd_caps(void)160 x86_simd_caps(void) {
161 unsigned int flags = 0;
162 unsigned int mask = ~0;
163 unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
164 char *env;
165 (void)reg_ebx;
166
167 /* See if the CPU capabilities are being overridden by the environment */
168 env = getenv("VPX_SIMD_CAPS");
169
170 if (env && *env)
171 return (int)strtol(env, NULL, 0);
172
173 env = getenv("VPX_SIMD_CAPS_MASK");
174
175 if (env && *env)
176 mask = (unsigned int)strtoul(env, NULL, 0);
177
178 /* Ensure that the CPUID instruction supports extended features */
179 cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx);
180
181 if (max_cpuid_val < 1)
182 return 0;
183
184 /* Get the standard feature flags */
185 cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
186
187 if (reg_edx & BIT(23)) flags |= HAS_MMX;
188
189 if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
190
191 if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
192
193 if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
194
195 if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
196
197 if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
198
199 // bits 27 (OSXSAVE) & 28 (256-bit AVX)
200 if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
201 if ((xgetbv() & 0x6) == 0x6) {
202 flags |= HAS_AVX;
203
204 if (max_cpuid_val >= 7) {
205 /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
206 cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
207
208 if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
209 }
210 }
211 }
212
213 return flags & mask;
214 }
215
216 // Note:
217 // 32-bit CPU cycle counter is light-weighted for most function performance
218 // measurement. For large function (CPU time > a couple of seconds), 64-bit
219 // counter should be used.
220 // 32-bit CPU cycle counter
221 static INLINE unsigned int
x86_readtsc(void)222 x86_readtsc(void) {
223 #if defined(__GNUC__) && __GNUC__
224 unsigned int tsc;
225 __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
226 return tsc;
227 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
228 unsigned int tsc;
229 asm volatile("rdtsc\n\t":"=a"(tsc):);
230 return tsc;
231 #else
232 #if ARCH_X86_64
233 return (unsigned int)__rdtsc();
234 #else
235 __asm rdtsc;
236 #endif
237 #endif
238 }
239 // 64-bit CPU cycle counter
240 static INLINE uint64_t
x86_readtsc64(void)241 x86_readtsc64(void) {
242 #if defined(__GNUC__) && __GNUC__
243 uint32_t hi, lo;
244 __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
245 return ((uint64_t)hi << 32) | lo;
246 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
247 uint_t hi, lo;
248 asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi));
249 return ((uint64_t)hi << 32) | lo;
250 #else
251 #if ARCH_X86_64
252 return (uint64_t)__rdtsc();
253 #else
254 __asm rdtsc;
255 #endif
256 #endif
257 }
258
259 #if defined(__GNUC__) && __GNUC__
260 #define x86_pause_hint()\
261 __asm__ __volatile__ ("pause \n\t")
262 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
263 #define x86_pause_hint()\
264 asm volatile ("pause \n\t")
265 #else
266 #if ARCH_X86_64
267 #define x86_pause_hint()\
268 _mm_pause();
269 #else
270 #define x86_pause_hint()\
271 __asm pause
272 #endif
273 #endif
274
275 #if defined(__GNUC__) && __GNUC__
276 static void
x87_set_control_word(unsigned short mode)277 x87_set_control_word(unsigned short mode) {
278 __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
279 }
280 static unsigned short
x87_get_control_word(void)281 x87_get_control_word(void) {
282 unsigned short mode;
283 __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
284 return mode;
285 }
286 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
287 static void
x87_set_control_word(unsigned short mode)288 x87_set_control_word(unsigned short mode) {
289 asm volatile("fldcw %0" : : "m"(*&mode));
290 }
291 static unsigned short
x87_get_control_word(void)292 x87_get_control_word(void) {
293 unsigned short mode;
294 asm volatile("fstcw %0\n\t":"=m"(*&mode):);
295 return mode;
296 }
297 #elif ARCH_X86_64
298 /* No fldcw intrinsics on Windows x64, punt to external asm */
299 extern void vpx_winx64_fldcw(unsigned short mode);
300 extern unsigned short vpx_winx64_fstcw(void);
301 #define x87_set_control_word vpx_winx64_fldcw
302 #define x87_get_control_word vpx_winx64_fstcw
303 #else
304 static void
x87_set_control_word(unsigned short mode)305 x87_set_control_word(unsigned short mode) {
306 __asm { fldcw mode }
307 }
308 static unsigned short
x87_get_control_word(void)309 x87_get_control_word(void) {
310 unsigned short mode;
311 __asm { fstcw mode }
312 return mode;
313 }
314 #endif
315
316 static INLINE unsigned int
x87_set_double_precision(void)317 x87_set_double_precision(void) {
318 unsigned int mode = x87_get_control_word();
319 x87_set_control_word((mode&~0x300) | 0x200);
320 return mode;
321 }
322
323
324 extern void vpx_reset_mmx_state(void);
325
326 #ifdef __cplusplus
327 } // extern "C"
328 #endif
329
330 #endif // VPX_PORTS_X86_H_
331