xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision ef54aa85)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
32  */
33 
34 /*
35  * Functions to provide access to special i386 instructions.
36  * This in included in sys/systm.h, and that file should be
37  * used in preference to this.
38  */
39 
40 #ifndef _CPU_CPUFUNC_H_
41 #define	_CPU_CPUFUNC_H_
42 
43 #include <sys/cdefs.h>
44 #include <sys/thread.h>
45 #include <machine/clock.h>
46 #include <machine/psl.h>
47 #include <machine/smp.h>
48 
49 struct thread;
50 struct region_descriptor;
51 struct pmap;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #include <machine/lock.h>		/* XXX */
67 
68 struct trapframe;
69 
70 static __inline void
breakpoint(void)71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
cpu_pause(void)77 cpu_pause(void)
78 {
79 	__asm __volatile("pause":::"memory");
80 }
81 
82 static __inline u_int
bsfl(u_int mask)83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
bsfq(u_long mask)92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_long
bsflong(u_long mask)101 bsflong(u_long mask)
102 {
103 	u_long	result;
104 
105 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_int
bsrl(u_int mask)110 bsrl(u_int mask)
111 {
112 	u_int	result;
113 
114 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline u_long
bsrq(u_long mask)119 bsrq(u_long mask)
120 {
121 	u_long	result;
122 
123 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
124 	return (result);
125 }
126 
127 static __inline void
clflush(u_long addr)128 clflush(u_long addr)
129 {
130 	__asm __volatile("clflush %0" : : "m" (*(char *) addr));
131 }
132 
133 static __inline void
do_cpuid(u_int ax,u_int * p)134 do_cpuid(u_int ax, u_int *p)
135 {
136 	__asm __volatile("cpuid"
137 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
138 			 :  "0" (ax));
139 }
140 
141 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)142 cpuid_count(u_int ax, u_int cx, u_int *p)
143 {
144 	__asm __volatile("cpuid"
145 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
146 			 :  "0" (ax), "c" (cx));
147 }
148 
149 #ifndef _CPU_DISABLE_INTR_DEFINED
150 
151 static __inline void
cpu_disable_intr(void)152 cpu_disable_intr(void)
153 {
154 	__asm __volatile("cli" : : : "memory");
155 }
156 
157 #endif
158 
159 #ifndef _CPU_ENABLE_INTR_DEFINED
160 
161 static __inline void
cpu_enable_intr(void)162 cpu_enable_intr(void)
163 {
164 	__asm __volatile("sti");
165 }
166 
167 #endif
168 
169 /*
170  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
171  * write ordering.
172  *
173  * A serializing or fence instruction is required here.  A locked bus
174  * cycle on data for which we already own cache mastership is the most
175  * portable.
176  */
177 static __inline void
cpu_mfence(void)178 cpu_mfence(void)
179 {
180 	__asm __volatile("mfence" : : : "memory");
181 }
182 
183 /*
184  * cpu_lfence() ensures strong read ordering for reads issued prior
185  * to the instruction verses reads issued afterwords.
186  *
187  * A serializing or fence instruction is required here.  A locked bus
188  * cycle on data for which we already own cache mastership is the most
189  * portable.
190  */
191 static __inline void
cpu_lfence(void)192 cpu_lfence(void)
193 {
194 	__asm __volatile("lfence" : : : "memory");
195 }
196 
197 /*
198  * cpu_sfence() ensures strong write ordering for writes issued prior
199  * to the instruction verses writes issued afterwords.  Writes are
200  * ordered on intel cpus so we do not actually have to do anything.
201  */
202 static __inline void
cpu_sfence(void)203 cpu_sfence(void)
204 {
205 	/*
206 	 * NOTE:
207 	 * Don't use 'sfence' here, as it will create a lot of
208 	 * unnecessary stalls.
209 	 */
210 	__asm __volatile("" : : : "memory");
211 }
212 
213 /*
214  * cpu_ccfence() prevents the compiler from reordering instructions, in
215  * particular stores, relative to the current cpu.  Use cpu_sfence() if
216  * you need to guarentee ordering by both the compiler and by the cpu.
217  *
218  * This also prevents the compiler from caching memory loads into local
219  * variables across the routine.
220  */
221 static __inline void
cpu_ccfence(void)222 cpu_ccfence(void)
223 {
224 	__asm __volatile("" : : : "memory");
225 }
226 
227 /*
228  * This is a horrible, horrible hack that might have to be put at the
229  * end of certain procedures (on a case by case basis), just before it
230  * returns to avoid what we believe to be an unreported AMD cpu bug.
231  * Found to occur on both a Phenom II X4 820 (two of them), as well
232  * as a 48-core built around an Opteron 6168 (Id = 0x100f91  Stepping = 1).
233  * The problem does not appear to occur w/Intel cpus.
234  *
235  * The bug is likely related to either a write combining issue or the
236  * Return Address Stack (RAS) hardware cache.
237  *
238  * In particular, we had to do this for GCC's fill_sons_in_loop() routine
239  * which due to its deep recursion and stack flow appears to be able to
240  * tickle the amd cpu bug (w/ gcc-4.4.7).  Adding a single 'nop' to the
241  * end of the routine just before it returns works around the bug.
242  *
243  * The bug appears to be extremely sensitive to %rip and %rsp values, to
244  * the point where even just inserting an instruction in an unrelated
245  * procedure (shifting the entire code base being run) effects the outcome.
246  * DragonFly is probably able to more readily reproduce the bug due to
247  * the stackgap randomization code.  We would expect OpenBSD (where we got
248  * the stackgap randomization code from) to also be able to reproduce the
249  * issue.  To date we have only reproduced the issue in DragonFly.
250  */
251 #define __AMDCPUBUG_DFLY01_AVAILABLE__
252 
253 static __inline void
cpu_amdcpubug_dfly01(void)254 cpu_amdcpubug_dfly01(void)
255 {
256 	__asm __volatile("nop" : : : "memory");
257 }
258 
259 #ifdef _KERNEL
260 
261 #define	HAVE_INLINE_FFS
262 
263 static __inline int
ffs(int mask)264 ffs(int mask)
265 {
266 	return (__builtin_ffs(mask));
267 }
268 
269 #define	HAVE_INLINE_FFSL
270 
271 static __inline int
ffsl(long mask)272 ffsl(long mask)
273 {
274 	return (__builtin_ffsl(mask));
275 }
276 
277 #define	HAVE_INLINE_FLS
278 
279 static __inline int
fls(int mask)280 fls(int mask)
281 {
282 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
283 }
284 
285 #define	HAVE_INLINE_FLSL
286 
287 static __inline int
flsl(long mask)288 flsl(long mask)
289 {
290 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
291 }
292 
293 #define	HAVE_INLINE_FLSLL
294 
295 static __inline int
flsll(long long mask)296 flsll(long long mask)
297 {
298 	return (flsl((long)mask));
299 }
300 
301 #endif /* _KERNEL */
302 
303 static __inline void
halt(void)304 halt(void)
305 {
306 	__asm __volatile("hlt");
307 }
308 
309 /*
310  * The following complications are to get around gcc not having a
311  * constraint letter for the range 0..255.  We still put "d" in the
312  * constraint because "i" isn't a valid constraint when the port
313  * isn't constant.  This only matters for -O0 because otherwise
314  * the non-working version gets optimized away.
315  *
316  * Use an expression-statement instead of a conditional expression
317  * because gcc-2.6.0 would promote the operands of the conditional
318  * and produce poor code for "if ((inb(var) & const1) == const2)".
319  *
320  * The unnecessary test `(port) < 0x10000' is to generate a warning if
321  * the `port' has type u_short or smaller.  Such types are pessimal.
322  * This actually only works for signed types.  The range check is
323  * careful to avoid generating warnings.
324  */
325 #define	inb(port) __extension__ ({					\
326 	u_char	_data;							\
327 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
328 	    && (port) < 0x10000)					\
329 		_data = inbc(port);					\
330 	else								\
331 		_data = inbv(port);					\
332 	_data; })
333 
334 #define	outb(port, data) (						\
335 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
336 	&& (port) < 0x10000						\
337 	? outbc(port, data) : outbv(port, data))
338 
339 static __inline u_char
inbc(u_int port)340 inbc(u_int port)
341 {
342 	u_char	data;
343 
344 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
345 	return (data);
346 }
347 
348 static __inline void
outbc(u_int port,u_char data)349 outbc(u_int port, u_char data)
350 {
351 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
352 }
353 
354 static __inline u_char
inbv(u_int port)355 inbv(u_int port)
356 {
357 	u_char	data;
358 	/*
359 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
360 	 * %edx, while gcc generates inferior code (movw instead of movl)
361 	 * if we tell it to load (u_short) port.
362 	 */
363 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
364 	return (data);
365 }
366 
367 static __inline u_int
inl(u_int port)368 inl(u_int port)
369 {
370 	u_int	data;
371 
372 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
373 	return (data);
374 }
375 
376 static __inline void
insb(u_int port,void * addr,size_t cnt)377 insb(u_int port, void *addr, size_t cnt)
378 {
379 	__asm __volatile("cld; rep; insb"
380 			 : "+D" (addr), "+c" (cnt)
381 			 : "d" (port)
382 			 : "memory");
383 }
384 
385 static __inline void
insw(u_int port,void * addr,size_t cnt)386 insw(u_int port, void *addr, size_t cnt)
387 {
388 	__asm __volatile("cld; rep; insw"
389 			 : "+D" (addr), "+c" (cnt)
390 			 : "d" (port)
391 			 : "memory");
392 }
393 
394 static __inline void
insl(u_int port,void * addr,size_t cnt)395 insl(u_int port, void *addr, size_t cnt)
396 {
397 	__asm __volatile("cld; rep; insl"
398 			 : "+D" (addr), "+c" (cnt)
399 			 : "d" (port)
400 			 : "memory");
401 }
402 
403 static __inline void
invd(void)404 invd(void)
405 {
406 	__asm __volatile("invd");
407 }
408 
409 #if defined(_KERNEL)
410 
411 #ifndef _CPU_INVLPG_DEFINED
412 
413 /*
414  * Invalidate a particular VA on this cpu only
415  *
416  * TLB flush for an individual page (even if it has PG_G).
417  * Only works on 486+ CPUs (i386 does not have PG_G).
418  */
419 static __inline void
cpu_invlpg(void * addr)420 cpu_invlpg(void *addr)
421 {
422 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
423 }
424 
425 #endif
426 
427 static __inline void
cpu_nop(void)428 cpu_nop(void)
429 {
430 	__asm __volatile("rep; nop");
431 }
432 
433 #endif	/* _KERNEL */
434 
435 static __inline u_short
inw(u_int port)436 inw(u_int port)
437 {
438 	u_short	data;
439 
440 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
441 	return (data);
442 }
443 
444 static __inline u_int
loadandclear(volatile u_int * addr)445 loadandclear(volatile u_int *addr)
446 {
447 	u_int   result;
448 
449 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
450 			: "=&r" (result) : "m" (*addr));
451 	return (result);
452 }
453 
454 static __inline void
outbv(u_int port,u_char data)455 outbv(u_int port, u_char data)
456 {
457 	u_char	al;
458 	/*
459 	 * Use an unnecessary assignment to help gcc's register allocator.
460 	 * This make a large difference for gcc-1.40 and a tiny difference
461 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
462 	 * best results.  gcc-2.6.0 can't handle this.
463 	 */
464 	al = data;
465 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
466 }
467 
468 static __inline void
outl(u_int port,u_int data)469 outl(u_int port, u_int data)
470 {
471 	/*
472 	 * outl() and outw() aren't used much so we haven't looked at
473 	 * possible micro-optimizations such as the unnecessary
474 	 * assignment for them.
475 	 */
476 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
477 }
478 
479 static __inline void
outsb(u_int port,const void * addr,size_t cnt)480 outsb(u_int port, const void *addr, size_t cnt)
481 {
482 	__asm __volatile("cld; rep; outsb"
483 			 : "+S" (addr), "+c" (cnt)
484 			 : "d" (port));
485 }
486 
487 static __inline void
outsw(u_int port,const void * addr,size_t cnt)488 outsw(u_int port, const void *addr, size_t cnt)
489 {
490 	__asm __volatile("cld; rep; outsw"
491 			 : "+S" (addr), "+c" (cnt)
492 			 : "d" (port));
493 }
494 
495 static __inline void
outsl(u_int port,const void * addr,size_t cnt)496 outsl(u_int port, const void *addr, size_t cnt)
497 {
498 	__asm __volatile("cld; rep; outsl"
499 			 : "+S" (addr), "+c" (cnt)
500 			 : "d" (port));
501 }
502 
503 static __inline void
outw(u_int port,u_short data)504 outw(u_int port, u_short data)
505 {
506 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
507 }
508 
509 static __inline void
ia32_pause(void)510 ia32_pause(void)
511 {
512 	__asm __volatile("pause");
513 }
514 
515 static __inline u_long
read_rflags(void)516 read_rflags(void)
517 {
518 	u_long	rf;
519 
520 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
521 	return (rf);
522 }
523 
524 static __inline u_int64_t
rdmsr(u_int msr)525 rdmsr(u_int msr)
526 {
527 	u_int32_t low, high;
528 
529 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
530 	return (low | ((u_int64_t)high << 32));
531 }
532 
533 static __inline u_int64_t
rdpmc(u_int pmc)534 rdpmc(u_int pmc)
535 {
536 	u_int32_t low, high;
537 
538 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
539 	return (low | ((u_int64_t)high << 32));
540 }
541 
542 #define _RDTSC_SUPPORTED_
543 
544 static __inline tsc_uclock_t
rdtsc(void)545 rdtsc(void)
546 {
547 	u_int32_t low, high;
548 
549 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
550 	return (low | ((tsc_uclock_t)high << 32));
551 }
552 
553 #ifdef _KERNEL
554 #include <machine/cputypes.h>
555 #include <machine/md_var.h>
556 
557 static __inline tsc_uclock_t
rdtsc_ordered(void)558 rdtsc_ordered(void)
559 {
560 	if (cpu_vendor_id == CPU_VENDOR_INTEL)
561 		cpu_lfence();
562 	else
563 		cpu_mfence();
564 	return rdtsc();
565 }
566 #endif
567 
568 static __inline void
wbinvd(void)569 wbinvd(void)
570 {
571 	__asm __volatile("wbinvd");
572 }
573 
574 #if defined(_KERNEL)
575 void cpu_wbinvd_on_all_cpus_callback(void *arg);
576 
577 static __inline void
cpu_wbinvd_on_all_cpus(void)578 cpu_wbinvd_on_all_cpus(void)
579 {
580 	lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
581 }
582 #endif
583 
584 static __inline void
write_rflags(u_long rf)585 write_rflags(u_long rf)
586 {
587 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
588 }
589 
590 static __inline void
wrmsr(u_int msr,u_int64_t newval)591 wrmsr(u_int msr, u_int64_t newval)
592 {
593 	u_int32_t low, high;
594 
595 	low = newval;
596 	high = newval >> 32;
597 	__asm __volatile("wrmsr"
598 	    :
599 	    : "a" (low), "d" (high), "c" (msr)
600 	    : "memory");
601 }
602 
603 static __inline void
load_xcr(u_int xcr,uint64_t newval)604 load_xcr(u_int xcr, uint64_t newval)
605 {
606 	uint32_t low, high;
607 
608 	low = newval;
609 	high = newval >> 32;
610 
611 	__asm __volatile("xsetbv"
612 	    :
613 	    : "a" (low), "d" (high), "c" (xcr)
614 	    : "memory");
615 }
616 
617 static __inline uint64_t
rxcr(u_int xcr)618 rxcr(u_int xcr)
619 {
620 	uint32_t low, high;
621 
622 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (xcr));
623 	return (low | ((uint64_t)high << 32));
624 }
625 
626 static __inline void
load_cr0(u_long data)627 load_cr0(u_long data)
628 {
629 	__asm __volatile("movq %0,%%cr0" : : "r" (data) : "memory");
630 }
631 
632 static __inline u_long
rcr0(void)633 rcr0(void)
634 {
635 	u_long	data;
636 
637 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
638 	return (data);
639 }
640 
641 static __inline void
load_cr2(u_long data)642 load_cr2(u_long data)
643 {
644 	__asm __volatile("movq %0,%%cr2" : : "r" (data) : "memory");
645 }
646 
647 static __inline u_long
rcr2(void)648 rcr2(void)
649 {
650 	u_long	data;
651 
652 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
653 	return (data);
654 }
655 
656 static __inline void
load_cr3(u_long data)657 load_cr3(u_long data)
658 {
659 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
660 }
661 
662 static __inline u_long
rcr3(void)663 rcr3(void)
664 {
665 	u_long	data;
666 
667 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
668 	return (data);
669 }
670 
671 static __inline void
load_cr4(u_long data)672 load_cr4(u_long data)
673 {
674 	__asm __volatile("movq %0,%%cr4" : : "r" (data) : "memory");
675 }
676 
677 static __inline u_long
rcr4(void)678 rcr4(void)
679 {
680 	u_long	data;
681 
682 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
683 	return (data);
684 }
685 
686 #ifndef _CPU_INVLTLB_DEFINED
687 
688 /*
689  * Invalidate the TLB on this cpu only
690  */
691 static __inline void
cpu_invltlb(void)692 cpu_invltlb(void)
693 {
694 	load_cr3(rcr3());
695 #if defined(SWTCH_OPTIM_STATS)
696 	++tlb_flush_count;
697 #endif
698 }
699 
700 #endif
701 
702 void smp_invltlb(void);
703 void smp_sniff(void);
704 void cpu_sniff(int);
705 void hard_sniff(struct trapframe *);
706 
707 static __inline u_short
rfs(void)708 rfs(void)
709 {
710 	u_short sel;
711 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
712 	return (sel);
713 }
714 
715 static __inline u_short
rgs(void)716 rgs(void)
717 {
718 	u_short sel;
719 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
720 	return (sel);
721 }
722 
723 static __inline void
load_ds(u_short sel)724 load_ds(u_short sel)
725 {
726 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
727 }
728 
729 static __inline void
load_es(u_short sel)730 load_es(u_short sel)
731 {
732 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
733 }
734 
735 #ifdef _KERNEL
736 /* This is defined in <machine/specialreg.h> but is too painful to get to */
737 #ifndef	MSR_FSBASE
738 #define	MSR_FSBASE	0xc0000100
739 #endif
740 static __inline void
load_fs(u_short sel)741 load_fs(u_short sel)
742 {
743 	/* Preserve the fsbase value across the selector load */
744 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
745             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
746 }
747 
748 #ifndef	MSR_GSBASE
749 #define	MSR_GSBASE	0xc0000101
750 #endif
751 static __inline void
load_gs(u_short sel)752 load_gs(u_short sel)
753 {
754 	/*
755 	 * Preserve the gsbase value across the selector load.
756 	 * Note that we have to disable interrupts because the gsbase
757 	 * being trashed happens to be the kernel gsbase at the time.
758 	 */
759 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
760             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
761 }
762 #else
763 /* Usable by userland */
764 static __inline void
load_fs(u_short sel)765 load_fs(u_short sel)
766 {
767 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
768 }
769 
770 static __inline void
load_gs(u_short sel)771 load_gs(u_short sel)
772 {
773 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
774 }
775 #endif
776 
777 /* void lidt(struct region_descriptor *addr); */
778 static __inline void
lidt(struct region_descriptor * addr)779 lidt(struct region_descriptor *addr)
780 {
781 	__asm __volatile("lidt (%0)" : : "r" (addr));
782 }
783 
784 /* void lldt(u_short sel); */
785 static __inline void
lldt(u_short sel)786 lldt(u_short sel)
787 {
788 	__asm __volatile("lldt %0" : : "r" (sel));
789 }
790 
791 /* void ltr(u_short sel); */
792 static __inline void
ltr(u_short sel)793 ltr(u_short sel)
794 {
795 	__asm __volatile("ltr %0" : : "r" (sel));
796 }
797 
798 static __inline u_int64_t
rdr0(void)799 rdr0(void)
800 {
801 	u_int64_t data;
802 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
803 	return (data);
804 }
805 
806 static __inline void
load_dr0(u_int64_t dr0)807 load_dr0(u_int64_t dr0)
808 {
809 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0) : "memory");
810 }
811 
812 static __inline u_int64_t
rdr1(void)813 rdr1(void)
814 {
815 	u_int64_t data;
816 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
817 	return (data);
818 }
819 
820 static __inline void
load_dr1(u_int64_t dr1)821 load_dr1(u_int64_t dr1)
822 {
823 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1) : "memory");
824 }
825 
826 static __inline u_int64_t
rdr2(void)827 rdr2(void)
828 {
829 	u_int64_t data;
830 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
831 	return (data);
832 }
833 
834 static __inline void
load_dr2(u_int64_t dr2)835 load_dr2(u_int64_t dr2)
836 {
837 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2) : "memory");
838 }
839 
840 static __inline u_int64_t
rdr3(void)841 rdr3(void)
842 {
843 	u_int64_t data;
844 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
845 	return (data);
846 }
847 
848 static __inline void
load_dr3(u_int64_t dr3)849 load_dr3(u_int64_t dr3)
850 {
851 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3) : "memory");
852 }
853 
854 static __inline u_int64_t
rdr4(void)855 rdr4(void)
856 {
857 	u_int64_t data;
858 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
859 	return (data);
860 }
861 
862 static __inline void
load_dr4(u_int64_t dr4)863 load_dr4(u_int64_t dr4)
864 {
865 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4) : "memory");
866 }
867 
868 static __inline u_int64_t
rdr5(void)869 rdr5(void)
870 {
871 	u_int64_t data;
872 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
873 	return (data);
874 }
875 
876 static __inline void
load_dr5(u_int64_t dr5)877 load_dr5(u_int64_t dr5)
878 {
879 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5) : "memory");
880 }
881 
882 static __inline u_int64_t
rdr6(void)883 rdr6(void)
884 {
885 	u_int64_t data;
886 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
887 	return (data);
888 }
889 
890 static __inline void
load_dr6(u_int64_t dr6)891 load_dr6(u_int64_t dr6)
892 {
893 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6) : "memory");
894 }
895 
896 static __inline u_int64_t
rdr7(void)897 rdr7(void)
898 {
899 	u_int64_t data;
900 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
901 	return (data);
902 }
903 
904 static __inline void
load_dr7(u_int64_t dr7)905 load_dr7(u_int64_t dr7)
906 {
907 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7) : "memory");
908 }
909 
910 static __inline register_t
intr_disable(void)911 intr_disable(void)
912 {
913 	register_t rflags;
914 
915 	rflags = read_rflags();
916 	cpu_disable_intr();
917 	return (rflags);
918 }
919 
920 static __inline void
intr_restore(register_t rflags)921 intr_restore(register_t rflags)
922 {
923 	write_rflags(rflags);
924 }
925 
926 #else /* !__GNUC__ */
927 
928 int	breakpoint(void);
929 void	cpu_pause(void);
930 u_int	bsfl(u_int mask);
931 u_int	bsrl(u_int mask);
932 void	cpu_disable_intr(void);
933 void	cpu_enable_intr(void);
934 void	cpu_invlpg(u_long addr);
935 void	cpu_invlpg_range(u_long start, u_long end);
936 void	do_cpuid(u_int ax, u_int *p);
937 void	halt(void);
938 u_char	inb(u_int port);
939 u_int	inl(u_int port);
940 void	insb(u_int port, void *addr, size_t cnt);
941 void	insl(u_int port, void *addr, size_t cnt);
942 void	insw(u_int port, void *addr, size_t cnt);
943 void	invd(void);
944 void	invlpg_range(u_int start, u_int end);
945 void	cpu_invltlb(void);
946 u_short	inw(u_int port);
947 void	load_cr0(u_int cr0);
948 void	load_cr2(u_int cr2);
949 void	load_cr3(u_int cr3);
950 void	load_cr4(u_int cr4);
951 void	load_fs(u_int sel);
952 void	load_gs(u_int sel);
953 void	lidt(struct region_descriptor *addr);
954 void	lldt(u_short sel);
955 void	ltr(u_short sel);
956 void	outb(u_int port, u_char data);
957 void	outl(u_int port, u_int data);
958 void	outsb(u_int port, void *addr, size_t cnt);
959 void	outsl(u_int port, void *addr, size_t cnt);
960 void	outsw(u_int port, void *addr, size_t cnt);
961 void	outw(u_int port, u_short data);
962 void	ia32_pause(void);
963 u_int	rcr0(void);
964 u_int	rcr2(void);
965 u_int	rcr3(void);
966 u_int	rcr4(void);
967 u_short	rfs(void);
968 u_short	rgs(void);
969 u_int64_t rdmsr(u_int msr);
970 u_int64_t rdpmc(u_int pmc);
971 tsc_uclock_t rdtsc(void);
972 u_int	read_rflags(void);
973 void	wbinvd(void);
974 void	write_rflags(u_int rf);
975 void	wrmsr(u_int msr, u_int64_t newval);
976 u_int64_t	rdr0(void);
977 void	load_dr0(u_int64_t dr0);
978 u_int64_t	rdr1(void);
979 void	load_dr1(u_int64_t dr1);
980 u_int64_t	rdr2(void);
981 void	load_dr2(u_int64_t dr2);
982 u_int64_t	rdr3(void);
983 void	load_dr3(u_int64_t dr3);
984 u_int64_t	rdr4(void);
985 void	load_dr4(u_int64_t dr4);
986 u_int64_t	rdr5(void);
987 void	load_dr5(u_int64_t dr5);
988 u_int64_t	rdr6(void);
989 void	load_dr6(u_int64_t dr6);
990 u_int64_t	rdr7(void);
991 void	load_dr7(u_int64_t dr7);
992 register_t	intr_disable(void);
993 void	intr_restore(register_t rf);
994 
995 #endif	/* __GNUC__ */
996 
997 int	rdmsr_safe(u_int msr, uint64_t *val);
998 int wrmsr_safe(u_int msr, uint64_t newval);
999 void	reset_dbregs(void);
1000 void	smap_open(void);
1001 void	smap_close(void);
1002 
1003 __END_DECLS
1004 
1005 #endif /* !_CPU_CPUFUNC_H_ */
1006