1 /* Libvisual - The audio visualisation framework.
2  *
3  * Copyright (C) 2004, 2005 Dennis Smit <ds@nerds-incorporated.org>
4  *
5  * Authors: Dennis Smit <ds@nerds-incorporated.org>
6  *	    Chong Kai Xiong <descender@phreaker.net>
7  *	    Eric Anholt <anholt@FreeBSD.org>
8  *
9  * Extra Credits: MPlayer cpudetect hackers.
10  *
11  * $Id:
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU Lesser General Public License as
15  * published by the Free Software Foundation; either version 2.1
16  * of the License, or (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26  */
27 
28 /* FIXME: clean this entire file up */
29 
30 #include "lvconfig.h"
31 
32 #if defined(VISUAL_ARCH_POWERPC)
33 #if defined(VISUAL_OS_DARWIN)
34 #include <sys/sysctl.h>
35 #else
36 #include <signal.h>
37 #include <setjmp.h>
38 #endif
39 #endif
40 
41 #if defined(VISUAL_OS_NETBSD) || defined(VISUAL_OS_OPENBSD)
42 #include <sys/param.h>
43 #include <sys/sysctl.h>
44 #include <machine/cpu.h>
45 #endif
46 
47 #if defined(VISUAL_OS_FREEBSD)
48 #include <sys/types.h>
49 #include <sys/sysctl.h>
50 #endif
51 
52 #if defined(VISUAL_OS_LINUX)
53 #include <signal.h>
54 #endif
55 
56 #if defined(VISUAL_OS_WIN32)
57 #include <windows.h>
58 #endif
59 
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <unistd.h>
63 #include <string.h>
64 
65 #include "lv_log.h"
66 #include "lv_cpu.h"
67 
68 static VisCPU _lv_cpu_caps;
69 static int _lv_cpu_initialized = FALSE;
70 
71 static int has_cpuid (void);
72 static int cpuid (unsigned int ax, unsigned int *p);
73 
74 /* The sigill handlers */
75 #if defined(VISUAL_ARCH_X86) //x86 (linux katmai handler check thing)
76 #if defined(VISUAL_OS_LINUX) && defined(_POSIX_SOURCE) && defined(X86_FXSR_MAGIC)
sigill_handler_sse(int signal,struct sigcontext sc)77 static void sigill_handler_sse( int signal, struct sigcontext sc )
78 {
79 	/* Both the "xorps %%xmm0,%%xmm0" and "divps %xmm0,%%xmm1"
80 	 * instructions are 3 bytes long.  We must increment the instruction
81 	 * pointer manually to avoid repeated execution of the offending
82 	 * instruction.
83 	 *
84 	 * If the SIGILL is caused by a divide-by-zero when unmasked
85 	 * exceptions aren't supported, the SIMD FPU status and control
86 	 * word will be restored at the end of the test, so we don't need
87 	 * to worry about doing it here.  Besides, we may not be able to...
88 	 */
89 	sc.eip += 3;
90 
91 	_lv_cpu_caps.hasSSE=0;
92 }
93 
sigfpe_handler_sse(int signal,struct sigcontext sc)94 static void sigfpe_handler_sse( int signal, struct sigcontext sc )
95 {
96 	if ( sc.fpstate->magic != 0xffff ) {
97 		/* Our signal context has the extended FPU state, so reset the
98 		 * divide-by-zero exception mask and clear the divide-by-zero
99 		 * exception bit.
100 		 */
101 		sc.fpstate->mxcsr |= 0x00000200;
102 		sc.fpstate->mxcsr &= 0xfffffffb;
103 	} else {
104 		/* If we ever get here, we're completely hosed.
105 		*/
106 	}
107 }
108 #endif
109 #endif /* VISUAL_OS_LINUX && _POSIX_SOURCE && X86_FXSR_MAGIC */
110 
111 #if defined(VISUAL_OS_WIN32)
win32_sig_handler_sse(EXCEPTION_POINTERS * ep)112 LONG CALLBACK win32_sig_handler_sse(EXCEPTION_POINTERS* ep)
113 {
114 	if(ep->ExceptionRecord->ExceptionCode==EXCEPTION_ILLEGAL_INSTRUCTION){
115 		ep->ContextRecord->Eip +=3;
116 		_lv_cpu_caps.hasSSE=0;
117 		return EXCEPTION_CONTINUE_EXECUTION;
118 	}
119 	return EXCEPTION_CONTINUE_SEARCH;
120 }
121 #endif /* VISUAL_OS_WIN32 */
122 
123 
124 #if defined(VISUAL_ARCH_POWERPC) && !defined(VISUAL_OS_DARWIN)
125 static sigjmp_buf _lv_powerpc_jmpbuf;
126 static volatile sig_atomic_t _lv_powerpc_canjump = 0;
127 
128 static void sigill_handler (int sig);
129 
sigill_handler(int sig)130 static void sigill_handler (int sig)
131 {
132 	if (!_lv_powerpc_canjump) {
133 		signal (sig, SIG_DFL);
134 		raise (sig);
135 	}
136 
137 	_lv_powerpc_canjump = 0;
138 	siglongjmp (_lv_powerpc_jmpbuf, 1);
139 }
140 
check_os_altivec_support(void)141 static void check_os_altivec_support( void )
142 {
143 #if defined(VISUAL_OS_DARWIN)
144 	int sels[2] = {CTL_HW, HW_VECTORUNIT};
145 	int has_vu = 0;
146 	size_t len = sizeof(has_vu);
147 	int err;
148 
149 	err = sysctl (sels, 2, &has_vu, &len, NULL, 0);
150 
151 	if (err == 0)
152 		if (has_vu != 0)
153 			_lv_cpu_caps.hasAltiVec = 1;
154 #else /* !VISUAL_OS_DARWIN */
155 	/* no Darwin, do it the brute-force way */
156 	/* this is borrowed from the libmpeg2 library */
157 	signal (SIGILL, sigill_handler);
158 	if (sigsetjmp (_lv_powerpc_jmpbuf, 1)) {
159 		signal (SIGILL, SIG_DFL);
160 	} else {
161 		_lv_powerpc_canjump = 1;
162 
163 		asm volatile
164 			("mtspr 256, %0\n\t"
165 			 "vand %%v0, %%v0, %%v0"
166 			 :
167 			 : "r" (-1));
168 
169 		signal (SIGILL, SIG_DFL);
170 		_lv_cpu_caps.hasAltiVec = 1;
171 	}
172 #endif
173 }
174 #endif
175 
176 /* If we're running on a processor that can do SSE, let's see if we
177  * are allowed to or not.  This will catch 2.4.0 or later kernels that
178  * haven't been configured for a Pentium III but are running on one,
179  * and RedHat patched 2.2 kernels that have broken exception handling
180  * support for user space apps that do SSE.
181  */
check_os_katmai_support(void)182 static void check_os_katmai_support( void )
183 {
184 //	printf ("omg\n");
185 #if defined(VISUAL_ARCH_X86)
186 #if defined(VISUAL_OS_FREEBSD)
187 	int has_sse=0, ret;
188 	size_t len=sizeof(has_sse);
189 
190 	ret = sysctlbyname("hw.instruction_sse", &has_sse, &len, NULL, 0);
191 	if (ret || !has_sse)
192 		_lv_cpu_caps.hasSSE=0;
193 
194 #elif defined(VISUAL_OS_NETBSD) || defined(VISUAL_OS_OPENBSD)
195 	int has_sse, has_sse2, ret, mib[2];
196 	size_t varlen;
197 
198 	mib[0] = CTL_MACHDEP;
199 	mib[1] = CPU_SSE;
200 	varlen = sizeof(has_sse);
201 
202 	ret = sysctl(mib, 2, &has_sse, &varlen, NULL, 0);
203 	if (ret < 0 || !has_sse) {
204 		_lv_cpu_caps.hasSSE=0;
205 	} else {
206 		_lv_cpu_caps.hasSSE=1;
207 	}
208 
209 	mib[1] = CPU_SSE2;
210 	varlen = sizeof(has_sse2);
211 	ret = sysctl(mib, 2, &has_sse2, &varlen, NULL, 0);
212 	if (ret < 0 || !has_sse2) {
213 		_lv_cpu_caps.hasSSE2=0;
214 	} else {
215 		_lv_cpu_caps.hasSSE2=1;
216 	}
217 	_lv_cpu_caps.hasSSE = 0; /* FIXME ?!?!? */
218 
219 #elif defined(VISUAL_OS_WIN32)
220 	LPTOP_LEVEL_EXCEPTION_FILTER exc_fil;
221 	if ( _lv_cpu_caps.hasSSE ) {
222 		exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse);
223 		__asm __volatile ("xorps %xmm0, %xmm0");
224 		SetUnhandledExceptionFilter(exc_fil);
225 	}
226 #elif defined(VISUAL_OS_LINUX)
227 //	printf ("omg1\n");
228 //	printf ("omg2\n");
229 	struct sigaction saved_sigill;
230 	struct sigaction saved_sigfpe;
231 
232 	/* Save the original signal handlers.
233 	*/
234 	sigaction( SIGILL, NULL, &saved_sigill );
235 	sigaction( SIGFPE, NULL, &saved_sigfpe );
236 
237 	signal( SIGILL, (void (*)(int))sigill_handler_sse );
238 	signal( SIGFPE, (void (*)(int))sigfpe_handler_sse );
239 
240 	/* Emulate test for OSFXSR in CR4.  The OS will set this bit if it
241 	 * supports the extended FPU save and restore required for SSE.  If
242 	 * we execute an SSE instruction on a PIII and get a SIGILL, the OS
243 	 * doesn't support Streaming SIMD Exceptions, even if the processor
244 	 * does.
245 	 */
246 	if ( _lv_cpu_caps.hasSSE ) {
247 		__asm __volatile ("xorps %xmm1, %xmm0");
248 	}
249 
250 	/* Emulate test for OSXMMEXCPT in CR4.  The OS will set this bit if
251 	 * it supports unmasked SIMD FPU exceptions.  If we unmask the
252 	 * exceptions, do a SIMD divide-by-zero and get a SIGILL, the OS
253 	 * doesn't support unmasked SIMD FPU exceptions.  If we get a SIGFPE
254 	 * as expected, we're okay but we need to clean up after it.
255 	 *
256 	 * Are we being too stringent in our requirement that the OS support
257 	 * unmasked exceptions?  Certain RedHat 2.2 kernels enable SSE by
258 	 * setting CR4.OSFXSR but don't support unmasked exceptions.  Win98
259 	 * doesn't even support them.  We at least know the user-space SSE
260 	 * support is good in kernels that do support unmasked exceptions,
261 	 * and therefore to be safe I'm going to leave this test in here.
262 	 */
263 	if ( _lv_cpu_caps.hasSSE ) {
264 		//      test_os_katmai_exception_support();
265 	}
266 
267 	/* Restore the original signal handlers.
268 	*/
269 	sigaction( SIGILL, &saved_sigill, NULL );
270 	sigaction( SIGFPE, &saved_sigfpe, NULL );
271 
272 #else
273 //	printf ("hier dan3\n");
274 	/* We can't use POSIX signal handling to test the availability of
275 	 * SSE, so we disable it by default.
276 	 */
277 	_lv_cpu_caps.hasSSE=0;
278 #endif /* __linux__ */
279 //	printf ("hier dan\n");
280 #endif
281 //	printf ("hier dan ha\n");
282 }
283 
284 
has_cpuid(void)285 static int has_cpuid (void)
286 {
287 #ifdef VISUAL_ARCH_X86
288 	int a, c;
289 
290 	__asm __volatile
291 		("pushf\n"
292 		 "popl %0\n"
293 		 "movl %0, %1\n"
294 		 "xorl $0x200000, %0\n"
295 		 "push %0\n"
296 		 "popf\n"
297 		 "pushf\n"
298 		 "popl %0\n"
299 		 : "=a" (a), "=c" (c)
300 		 :
301 		 : "cc");
302 
303 	return a != c;
304 #else
305 	return 0;
306 #endif
307 }
308 
cpuid(unsigned int ax,unsigned int * p)309 static int cpuid (unsigned int ax, unsigned int *p)
310 {
311 #ifdef VISUAL_ARCH_X86
312 	uint32_t flags;
313 
314 	__asm __volatile
315 		("movl %%ebx, %%esi\n\t"
316 		 "cpuid\n\t"
317 		 "xchgl %%ebx, %%esi"
318 		 : "=a" (p[0]), "=S" (p[1]),
319 		 "=c" (p[2]), "=d" (p[3])
320 		 : "0" (ax));
321 
322 	return VISUAL_OK;
323 #else
324 	return VISUAL_ERROR_CPU_INVALID_CODE;
325 #endif
326 }
327 
328 /**
329  * @defgroup VisCPU VisCPU
330  * @{
331  */
332 
visual_cpu_initialize()333 void visual_cpu_initialize ()
334 {
335 	uint32_t cpu_flags;
336 	unsigned int regs[4];
337 	unsigned int regs2[4];
338 
339 	memset (&_lv_cpu_caps, 0, sizeof (VisCPU));
340 
341 	/* Check for arch type */
342 #if defined(VISUAL_ARCH_MIPS)
343 	_lv_cpu_caps.type = VISUAL_CPU_TYPE_MIPS;
344 #elif defined(VISUAL_ARCH_ALPHA)
345 	_lv_cpu_caps.type = VISUAL_CPU_TYPE_ALPHA;
346 #elif defined(VISUAL_ARCH_SPARC)
347 	_lv_cpu_caps.type = VISUAL_CPU_TYPE_SPARC;
348 #elif defined(VISUAL_ARCH_X86)
349 	_lv_cpu_caps.type = VISUAL_CPU_TYPE_X86;
350 #elif defined(VISUAL_ARCH_POWERPC)
351 	_lv_cpu_caps.type = VISUAL_CPU_TYPE_POWERPC;
352 #else
353 	_lv_cpu_caps.type = VISUAL_CPU_TYPE_OTHER;
354 #endif
355 
356 	/* Count the number of CPUs in system */
357 #if !defined(VISUAL_OS_WIN32) && !defined(VISUAL_OS_UNKNOWN)
358 	_lv_cpu_caps.nrcpu = sysconf (_SC_NPROCESSORS_ONLN);
359 	if (_lv_cpu_caps.nrcpu == -1)
360 		_lv_cpu_caps.nrcpu = 1;
361 #else
362 	_lv_cpu_caps.nrcpu = 1;
363 #endif
364 
365 #if defined(VISUAL_ARCH_X86)
366 	/* No cpuid, old 486 or lower */
367 	if (has_cpuid () == 0)
368 		return;
369 
370 	_lv_cpu_caps.cacheline = 32;
371 
372 	/* Get max cpuid level */
373 	cpuid (0x00000000, regs);
374 
375 	if (regs[0] >= 0x00000001) {
376 		unsigned int cacheline;
377 
378 		cpuid (0x00000001, regs2);
379 
380 		_lv_cpu_caps.x86cpuType = (regs2[0] >> 8) & 0xf;
381 		if (_lv_cpu_caps.x86cpuType == 0xf)
382 		    _lv_cpu_caps.x86cpuType = 8 + ((regs2[0] >> 20) & 255); /* use extended family (P4, IA64) */
383 
384 		/* general feature flags */
385 		_lv_cpu_caps.hasTSC  = (regs2[3] & (1 << 8  )) >>  8; /* 0x0000010 */
386 		_lv_cpu_caps.hasMMX  = (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */
387 		_lv_cpu_caps.hasSSE  = (regs2[3] & (1 << 25 )) >> 25; /* 0x2000000 */
388 		_lv_cpu_caps.hasSSE2 = (regs2[3] & (1 << 26 )) >> 26; /* 0x4000000 */
389 		_lv_cpu_caps.hasMMX2 = _lv_cpu_caps.hasSSE; /* SSE cpus supports mmxext too */
390 
391 		cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
392 		if (cacheline > 0)
393 			_lv_cpu_caps.cacheline = cacheline;
394 	}
395 
396 	cpuid (0x80000000, regs);
397 
398 	if (regs[0] >= 0x80000001) {
399 
400 		cpuid (0x80000001, regs2);
401 
402 		_lv_cpu_caps.hasMMX  |= (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */
403 		_lv_cpu_caps.hasMMX2 |= (regs2[3] & (1 << 22 )) >> 22; /* 0x400000 */
404 		_lv_cpu_caps.has3DNow    = (regs2[3] & (1 << 31 )) >> 31; /* 0x80000000 */
405 		_lv_cpu_caps.has3DNowExt = (regs2[3] & (1 << 30 )) >> 30;
406 	}
407 
408 	if (regs[0] >= 0x80000006) {
409 		cpuid (0x80000006, regs2);
410 		_lv_cpu_caps.cacheline = regs2[2] & 0xFF;
411 	}
412 
413 
414 #if defined(VISUAL_OS_LINUX) || defined(VISUAL_OS_FREEBSD) || defined(VISUAL_OS_NETBSD) || defined(VISUAL_OS_CYGWIN) || defined(VISUAL_OS_OPENBSD)
415 	if (_lv_cpu_caps.hasSSE)
416 		check_os_katmai_support ();
417 
418 	if (!_lv_cpu_caps.hasSSE)
419 		_lv_cpu_caps.hasSSE2 = 0;
420 #else
421 	_lv_cpu_caps.hasSSE=0;
422 	_lv_cpu_caps.hasSSE2 = 0;
423 #endif
424 #endif /* VISUAL_ARCH_X86 */
425 
426 #if defined(VISUAL_ARCH_POWERPC)
427 	check_os_altivec_support ();
428 #endif /* VISUAL_ARCH_POWERPC */
429 
430 	visual_log (VISUAL_LOG_DEBUG, "CPU: Number of CPUs: %d", _lv_cpu_caps.nrcpu);
431 	visual_log (VISUAL_LOG_DEBUG, "CPU: type %d", _lv_cpu_caps.type);
432 	visual_log (VISUAL_LOG_DEBUG, "CPU: X86 type %d", _lv_cpu_caps.x86cpuType);
433 	visual_log (VISUAL_LOG_DEBUG, "CPU: cacheline %d", _lv_cpu_caps.cacheline);
434 	visual_log (VISUAL_LOG_DEBUG, "CPU: TSC %d", _lv_cpu_caps.hasTSC);
435 	visual_log (VISUAL_LOG_DEBUG, "CPU: MMX %d", _lv_cpu_caps.hasMMX);
436 	visual_log (VISUAL_LOG_DEBUG, "CPU: MMX2 %d", _lv_cpu_caps.hasMMX2);
437 	visual_log (VISUAL_LOG_DEBUG, "CPU: SSE %d", _lv_cpu_caps.hasSSE);
438 	visual_log (VISUAL_LOG_DEBUG, "CPU: SSE2 %d", _lv_cpu_caps.hasSSE2);
439 	visual_log (VISUAL_LOG_DEBUG, "CPU: 3DNow %d", _lv_cpu_caps.has3DNow);
440 	visual_log (VISUAL_LOG_DEBUG, "CPU: 3DNowExt %d", _lv_cpu_caps.has3DNowExt);
441 	visual_log (VISUAL_LOG_DEBUG, "CPU: AltiVec %d", _lv_cpu_caps.hasAltiVec);
442 
443 	_lv_cpu_initialized = TRUE;
444 }
445 
visual_cpu_get_caps()446 VisCPU *visual_cpu_get_caps ()
447 {
448 	if (_lv_cpu_initialized == FALSE)
449 		return NULL;
450 
451 	return &_lv_cpu_caps;
452 }
453 
454 /**
455  * @}
456  */
457 
458