xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision e65bc1c3)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36  */
37 
38 /*
39  * Functions to provide access to special i386 instructions.
40  * This in included in sys/systm.h, and that file should be
41  * used in preference to this.
42  */
43 
44 #ifndef _CPU_CPUFUNC_H_
45 #define	_CPU_CPUFUNC_H_
46 
47 #include <sys/cdefs.h>
48 #include <sys/thread.h>
49 #include <machine/psl.h>
50 #include <machine/smp.h>
51 
52 struct thread;
53 struct region_descriptor;
54 
55 __BEGIN_DECLS
56 #define readb(va)	(*(volatile u_int8_t *) (va))
57 #define readw(va)	(*(volatile u_int16_t *) (va))
58 #define readl(va)	(*(volatile u_int32_t *) (va))
59 #define readq(va)	(*(volatile u_int64_t *) (va))
60 
61 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
62 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
63 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
64 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
65 
66 #ifdef	__GNUC__
67 
68 #include <machine/lock.h>		/* XXX */
69 
70 static __inline void
71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
77 cpu_pause(void)
78 {
79 	__asm __volatile("pause":::"memory");
80 }
81 
82 static __inline u_int
83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_long
101 bsflong(u_long mask)
102 {
103 	u_long	result;
104 
105 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_int
110 bsrl(u_int mask)
111 {
112 	u_int	result;
113 
114 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline u_long
119 bsrq(u_long mask)
120 {
121 	u_long	result;
122 
123 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
124 	return (result);
125 }
126 
127 static __inline void
128 clflush(u_long addr)
129 {
130 	__asm __volatile("clflush %0" : : "m" (*(char *) addr));
131 }
132 
133 static __inline void
134 do_cpuid(u_int ax, u_int *p)
135 {
136 	__asm __volatile("cpuid"
137 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
138 			 :  "0" (ax));
139 }
140 
141 static __inline void
142 cpuid_count(u_int ax, u_int cx, u_int *p)
143 {
144 	__asm __volatile("cpuid"
145 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
146 			 :  "0" (ax), "c" (cx));
147 }
148 
149 #ifndef _CPU_DISABLE_INTR_DEFINED
150 
151 static __inline void
152 cpu_disable_intr(void)
153 {
154 	__asm __volatile("cli" : : : "memory");
155 }
156 
157 #endif
158 
159 #ifndef _CPU_ENABLE_INTR_DEFINED
160 
161 static __inline void
162 cpu_enable_intr(void)
163 {
164 	__asm __volatile("sti");
165 }
166 
167 #endif
168 
169 /*
170  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
171  * write ordering.
172  *
173  * A serializing or fence instruction is required here.  A locked bus
174  * cycle on data for which we already own cache mastership is the most
175  * portable.
176  */
177 static __inline void
178 cpu_mfence(void)
179 {
180 	__asm __volatile("mfence" : : : "memory");
181 }
182 
183 /*
184  * cpu_lfence() ensures strong read ordering for reads issued prior
185  * to the instruction verses reads issued afterwords.
186  *
187  * A serializing or fence instruction is required here.  A locked bus
188  * cycle on data for which we already own cache mastership is the most
189  * portable.
190  */
191 static __inline void
192 cpu_lfence(void)
193 {
194 	__asm __volatile("lfence" : : : "memory");
195 }
196 
197 /*
198  * cpu_sfence() ensures strong write ordering for writes issued prior
199  * to the instruction verses writes issued afterwords.  Writes are
200  * ordered on intel cpus so we do not actually have to do anything.
201  */
202 static __inline void
203 cpu_sfence(void)
204 {
205 	/*
206 	 * NOTE:
207 	 * Don't use 'sfence' here, as it will create a lot of
208 	 * unnecessary stalls.
209 	 */
210 	__asm __volatile("" : : : "memory");
211 }
212 
213 /*
214  * cpu_ccfence() prevents the compiler from reordering instructions, in
215  * particular stores, relative to the current cpu.  Use cpu_sfence() if
216  * you need to guarentee ordering by both the compiler and by the cpu.
217  *
218  * This also prevents the compiler from caching memory loads into local
219  * variables across the routine.
220  */
221 static __inline void
222 cpu_ccfence(void)
223 {
224 	__asm __volatile("" : : : "memory");
225 }
226 
227 /*
228  * This is a horrible, horrible hack that might have to be put at the
229  * end of certain procedures (on a case by case basis), just before it
230  * returns to avoid what we believe to be an unreported AMD cpu bug.
231  * Found to occur on both a Phenom II X4 820 (two of them), as well
232  * as a 48-core built around an Opteron 6168 (Id = 0x100f91  Stepping = 1).
233  * The problem does not appear to occur w/Intel cpus.
234  *
235  * The bug is likely related to either a write combining issue or the
236  * Return Address Stack (RAS) hardware cache.
237  *
238  * In particular, we had to do this for GCC's fill_sons_in_loop() routine
239  * which due to its deep recursion and stack flow appears to be able to
240  * tickle the amd cpu bug (w/ gcc-4.4.7).  Adding a single 'nop' to the
241  * end of the routine just before it returns works around the bug.
242  *
243  * The bug appears to be extremely sensitive to %rip and %rsp values, to
244  * the point where even just inserting an instruction in an unrelated
245  * procedure (shifting the entire code base being run) effects the outcome.
246  * DragonFly is probably able to more readily reproduce the bug due to
247  * the stackgap randomization code.  We would expect OpenBSD (where we got
248  * the stackgap randomization code from) to also be able to reproduce the
249  * issue.  To date we have only reproduced the issue in DragonFly.
250  */
251 #define __AMDCPUBUG_DFLY01_AVAILABLE__
252 
253 static __inline void
254 cpu_amdcpubug_dfly01(void)
255 {
256 	__asm __volatile("nop" : : : "memory");
257 }
258 
259 #ifdef _KERNEL
260 
261 #define	HAVE_INLINE_FFS
262 
263 static __inline int
264 ffs(int mask)
265 {
266 #if 0
267 	/*
268 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
269 	 * this inline or turn off the builtin.  The builtin is faster but
270 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
271 	 * versions.
272 	 */
273 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
274 #else
275 	/* Actually, the above is way out of date.  The builtins use cmov etc */
276 	return (__builtin_ffs(mask));
277 #endif
278 }
279 
280 #define	HAVE_INLINE_FFSL
281 
282 static __inline int
283 ffsl(long mask)
284 {
285 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
286 }
287 
288 #define	HAVE_INLINE_FLS
289 
290 static __inline int
291 fls(int mask)
292 {
293 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
294 }
295 
296 #define	HAVE_INLINE_FLSL
297 
298 static __inline int
299 flsl(long mask)
300 {
301 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
302 }
303 
304 #endif /* _KERNEL */
305 
306 static __inline void
307 halt(void)
308 {
309 	__asm __volatile("hlt");
310 }
311 
312 /*
313  * The following complications are to get around gcc not having a
314  * constraint letter for the range 0..255.  We still put "d" in the
315  * constraint because "i" isn't a valid constraint when the port
316  * isn't constant.  This only matters for -O0 because otherwise
317  * the non-working version gets optimized away.
318  *
319  * Use an expression-statement instead of a conditional expression
320  * because gcc-2.6.0 would promote the operands of the conditional
321  * and produce poor code for "if ((inb(var) & const1) == const2)".
322  *
323  * The unnecessary test `(port) < 0x10000' is to generate a warning if
324  * the `port' has type u_short or smaller.  Such types are pessimal.
325  * This actually only works for signed types.  The range check is
326  * careful to avoid generating warnings.
327  */
328 #define	inb(port) __extension__ ({					\
329 	u_char	_data;							\
330 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
331 	    && (port) < 0x10000)					\
332 		_data = inbc(port);					\
333 	else								\
334 		_data = inbv(port);					\
335 	_data; })
336 
337 #define	outb(port, data) (						\
338 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
339 	&& (port) < 0x10000						\
340 	? outbc(port, data) : outbv(port, data))
341 
342 static __inline u_char
343 inbc(u_int port)
344 {
345 	u_char	data;
346 
347 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
348 	return (data);
349 }
350 
351 static __inline void
352 outbc(u_int port, u_char data)
353 {
354 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
355 }
356 
357 static __inline u_char
358 inbv(u_int port)
359 {
360 	u_char	data;
361 	/*
362 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
363 	 * %edx, while gcc generates inferior code (movw instead of movl)
364 	 * if we tell it to load (u_short) port.
365 	 */
366 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
367 	return (data);
368 }
369 
370 static __inline u_int
371 inl(u_int port)
372 {
373 	u_int	data;
374 
375 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
376 	return (data);
377 }
378 
379 static __inline void
380 insb(u_int port, void *addr, size_t cnt)
381 {
382 	__asm __volatile("cld; rep; insb"
383 			 : "+D" (addr), "+c" (cnt)
384 			 : "d" (port)
385 			 : "memory");
386 }
387 
388 static __inline void
389 insw(u_int port, void *addr, size_t cnt)
390 {
391 	__asm __volatile("cld; rep; insw"
392 			 : "+D" (addr), "+c" (cnt)
393 			 : "d" (port)
394 			 : "memory");
395 }
396 
397 static __inline void
398 insl(u_int port, void *addr, size_t cnt)
399 {
400 	__asm __volatile("cld; rep; insl"
401 			 : "+D" (addr), "+c" (cnt)
402 			 : "d" (port)
403 			 : "memory");
404 }
405 
406 static __inline void
407 invd(void)
408 {
409 	__asm __volatile("invd");
410 }
411 
412 #if defined(_KERNEL)
413 
414 /*
415  * If we are not a true-SMP box then smp_invltlb() is a NOP.  Note that this
416  * will cause the invl*() functions to be equivalent to the cpu_invl*()
417  * functions.
418  */
419 void smp_invltlb(void);
420 void smp_invltlb_intr(void);
421 
422 #ifndef _CPU_INVLPG_DEFINED
423 
424 /*
425  * Invalidate a particular VA on this cpu only
426  *
427  * TLB flush for an individual page (even if it has PG_G).
428  * Only works on 486+ CPUs (i386 does not have PG_G).
429  */
430 static __inline void
431 cpu_invlpg(void *addr)
432 {
433 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
434 }
435 
436 #endif
437 
438 #if defined(_KERNEL)
439 struct smp_invlpg_range_cpusync_arg {
440 	vm_offset_t sva;
441 	vm_offset_t eva;
442 };
443 
444 void
445 smp_invlpg_range_cpusync(void *arg);
446 
447 static __inline void
448 smp_invlpg_range(cpumask_t mask, vm_offset_t sva, vm_offset_t eva)
449 {
450 	struct smp_invlpg_range_cpusync_arg arg;
451 
452 	arg.sva = sva;
453 	arg.eva = eva;
454 	lwkt_cpusync_simple(mask, smp_invlpg_range_cpusync, &arg);
455 }
456 #endif
457 
458 static __inline void
459 cpu_nop(void)
460 {
461 	__asm __volatile("rep; nop");
462 }
463 
464 #endif	/* _KERNEL */
465 
466 static __inline u_short
467 inw(u_int port)
468 {
469 	u_short	data;
470 
471 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
472 	return (data);
473 }
474 
475 static __inline u_int
476 loadandclear(volatile u_int *addr)
477 {
478 	u_int   result;
479 
480 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
481 			: "=&r" (result) : "m" (*addr));
482 	return (result);
483 }
484 
485 static __inline void
486 outbv(u_int port, u_char data)
487 {
488 	u_char	al;
489 	/*
490 	 * Use an unnecessary assignment to help gcc's register allocator.
491 	 * This make a large difference for gcc-1.40 and a tiny difference
492 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
493 	 * best results.  gcc-2.6.0 can't handle this.
494 	 */
495 	al = data;
496 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
497 }
498 
499 static __inline void
500 outl(u_int port, u_int data)
501 {
502 	/*
503 	 * outl() and outw() aren't used much so we haven't looked at
504 	 * possible micro-optimizations such as the unnecessary
505 	 * assignment for them.
506 	 */
507 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
508 }
509 
510 static __inline void
511 outsb(u_int port, const void *addr, size_t cnt)
512 {
513 	__asm __volatile("cld; rep; outsb"
514 			 : "+S" (addr), "+c" (cnt)
515 			 : "d" (port));
516 }
517 
518 static __inline void
519 outsw(u_int port, const void *addr, size_t cnt)
520 {
521 	__asm __volatile("cld; rep; outsw"
522 			 : "+S" (addr), "+c" (cnt)
523 			 : "d" (port));
524 }
525 
526 static __inline void
527 outsl(u_int port, const void *addr, size_t cnt)
528 {
529 	__asm __volatile("cld; rep; outsl"
530 			 : "+S" (addr), "+c" (cnt)
531 			 : "d" (port));
532 }
533 
534 static __inline void
535 outw(u_int port, u_short data)
536 {
537 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
538 }
539 
540 static __inline void
541 ia32_pause(void)
542 {
543 	__asm __volatile("pause");
544 }
545 
546 static __inline u_long
547 read_rflags(void)
548 {
549 	u_long	rf;
550 
551 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
552 	return (rf);
553 }
554 
555 static __inline u_int64_t
556 rdmsr(u_int msr)
557 {
558 	u_int32_t low, high;
559 
560 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
561 	return (low | ((u_int64_t)high << 32));
562 }
563 
564 static __inline u_int64_t
565 rdpmc(u_int pmc)
566 {
567 	u_int32_t low, high;
568 
569 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
570 	return (low | ((u_int64_t)high << 32));
571 }
572 
573 #define _RDTSC_SUPPORTED_
574 
575 static __inline u_int64_t
576 rdtsc(void)
577 {
578 	u_int32_t low, high;
579 
580 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
581 	return (low | ((u_int64_t)high << 32));
582 }
583 
584 static __inline void
585 wbinvd(void)
586 {
587 	__asm __volatile("wbinvd");
588 }
589 
590 #if defined(_KERNEL)
591 void cpu_wbinvd_on_all_cpus_callback(void *arg);
592 
593 static __inline void
594 cpu_wbinvd_on_all_cpus(void)
595 {
596 	lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
597 }
598 #endif
599 
600 static __inline void
601 write_rflags(u_long rf)
602 {
603 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
604 }
605 
606 static __inline void
607 wrmsr(u_int msr, u_int64_t newval)
608 {
609 	u_int32_t low, high;
610 
611 	low = newval;
612 	high = newval >> 32;
613 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
614 }
615 
616 static __inline void
617 xsetbv(u_int ecx, u_int eax, u_int edx)
618 {
619 	__asm __volatile(".byte 0x0f,0x01,0xd1"
620 	    :
621 	    : "a" (eax), "c" (ecx), "d" (edx));
622 }
623 
624 static __inline void
625 load_cr0(u_long data)
626 {
627 
628 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
629 }
630 
631 static __inline u_long
632 rcr0(void)
633 {
634 	u_long	data;
635 
636 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
637 	return (data);
638 }
639 
640 static __inline u_long
641 rcr2(void)
642 {
643 	u_long	data;
644 
645 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
646 	return (data);
647 }
648 
649 static __inline void
650 load_cr3(u_long data)
651 {
652 
653 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
654 }
655 
656 static __inline u_long
657 rcr3(void)
658 {
659 	u_long	data;
660 
661 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
662 	return (data);
663 }
664 
665 static __inline void
666 load_cr4(u_long data)
667 {
668 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
669 }
670 
671 static __inline u_long
672 rcr4(void)
673 {
674 	u_long	data;
675 
676 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
677 	return (data);
678 }
679 
680 #ifndef _CPU_INVLTLB_DEFINED
681 
682 /*
683  * Invalidate the TLB on this cpu only
684  */
685 static __inline void
686 cpu_invltlb(void)
687 {
688 	load_cr3(rcr3());
689 #if defined(SWTCH_OPTIM_STATS)
690 	++tlb_flush_count;
691 #endif
692 }
693 
694 #endif
695 
696 static __inline u_short
697 rfs(void)
698 {
699 	u_short sel;
700 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
701 	return (sel);
702 }
703 
704 static __inline u_short
705 rgs(void)
706 {
707 	u_short sel;
708 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
709 	return (sel);
710 }
711 
712 static __inline void
713 load_ds(u_short sel)
714 {
715 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
716 }
717 
718 static __inline void
719 load_es(u_short sel)
720 {
721 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
722 }
723 
724 #ifdef _KERNEL
725 /* This is defined in <machine/specialreg.h> but is too painful to get to */
726 #ifndef	MSR_FSBASE
727 #define	MSR_FSBASE	0xc0000100
728 #endif
729 static __inline void
730 load_fs(u_short sel)
731 {
732 	/* Preserve the fsbase value across the selector load */
733 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
734             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
735 }
736 
737 #ifndef	MSR_GSBASE
738 #define	MSR_GSBASE	0xc0000101
739 #endif
740 static __inline void
741 load_gs(u_short sel)
742 {
743 	/*
744 	 * Preserve the gsbase value across the selector load.
745 	 * Note that we have to disable interrupts because the gsbase
746 	 * being trashed happens to be the kernel gsbase at the time.
747 	 */
748 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
749             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
750 }
751 #else
752 /* Usable by userland */
753 static __inline void
754 load_fs(u_short sel)
755 {
756 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
757 }
758 
759 static __inline void
760 load_gs(u_short sel)
761 {
762 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
763 }
764 #endif
765 
766 /* void lidt(struct region_descriptor *addr); */
767 static __inline void
768 lidt(struct region_descriptor *addr)
769 {
770 	__asm __volatile("lidt (%0)" : : "r" (addr));
771 }
772 
773 /* void lldt(u_short sel); */
774 static __inline void
775 lldt(u_short sel)
776 {
777 	__asm __volatile("lldt %0" : : "r" (sel));
778 }
779 
780 /* void ltr(u_short sel); */
781 static __inline void
782 ltr(u_short sel)
783 {
784 	__asm __volatile("ltr %0" : : "r" (sel));
785 }
786 
787 static __inline u_int64_t
788 rdr0(void)
789 {
790 	u_int64_t data;
791 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
792 	return (data);
793 }
794 
795 static __inline void
796 load_dr0(u_int64_t dr0)
797 {
798 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
799 }
800 
801 static __inline u_int64_t
802 rdr1(void)
803 {
804 	u_int64_t data;
805 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
806 	return (data);
807 }
808 
809 static __inline void
810 load_dr1(u_int64_t dr1)
811 {
812 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
813 }
814 
815 static __inline u_int64_t
816 rdr2(void)
817 {
818 	u_int64_t data;
819 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
820 	return (data);
821 }
822 
823 static __inline void
824 load_dr2(u_int64_t dr2)
825 {
826 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
827 }
828 
829 static __inline u_int64_t
830 rdr3(void)
831 {
832 	u_int64_t data;
833 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
834 	return (data);
835 }
836 
837 static __inline void
838 load_dr3(u_int64_t dr3)
839 {
840 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
841 }
842 
843 static __inline u_int64_t
844 rdr4(void)
845 {
846 	u_int64_t data;
847 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
848 	return (data);
849 }
850 
851 static __inline void
852 load_dr4(u_int64_t dr4)
853 {
854 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
855 }
856 
857 static __inline u_int64_t
858 rdr5(void)
859 {
860 	u_int64_t data;
861 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
862 	return (data);
863 }
864 
865 static __inline void
866 load_dr5(u_int64_t dr5)
867 {
868 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
869 }
870 
871 static __inline u_int64_t
872 rdr6(void)
873 {
874 	u_int64_t data;
875 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
876 	return (data);
877 }
878 
879 static __inline void
880 load_dr6(u_int64_t dr6)
881 {
882 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
883 }
884 
885 static __inline u_int64_t
886 rdr7(void)
887 {
888 	u_int64_t data;
889 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
890 	return (data);
891 }
892 
893 static __inline void
894 load_dr7(u_int64_t dr7)
895 {
896 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
897 }
898 
899 static __inline register_t
900 intr_disable(void)
901 {
902 	register_t rflags;
903 
904 	rflags = read_rflags();
905 	cpu_disable_intr();
906 	return (rflags);
907 }
908 
909 static __inline void
910 intr_restore(register_t rflags)
911 {
912 	write_rflags(rflags);
913 }
914 
915 #else /* !__GNUC__ */
916 
917 int	breakpoint(void);
918 void	cpu_pause(void);
919 u_int	bsfl(u_int mask);
920 u_int	bsrl(u_int mask);
921 void	cpu_disable_intr(void);
922 void	cpu_enable_intr(void);
923 void	cpu_invlpg(u_long addr);
924 void	cpu_invlpg_range(u_long start, u_long end);
925 void	do_cpuid(u_int ax, u_int *p);
926 void	halt(void);
927 u_char	inb(u_int port);
928 u_int	inl(u_int port);
929 void	insb(u_int port, void *addr, size_t cnt);
930 void	insl(u_int port, void *addr, size_t cnt);
931 void	insw(u_int port, void *addr, size_t cnt);
932 void	invd(void);
933 void	invlpg_range(u_int start, u_int end);
934 void	cpu_invltlb(void);
935 u_short	inw(u_int port);
936 void	load_cr0(u_int cr0);
937 void	load_cr3(u_int cr3);
938 void	load_cr4(u_int cr4);
939 void	load_fs(u_int sel);
940 void	load_gs(u_int sel);
941 struct region_descriptor;
942 void	lidt(struct region_descriptor *addr);
943 void	lldt(u_short sel);
944 void	ltr(u_short sel);
945 void	outb(u_int port, u_char data);
946 void	outl(u_int port, u_int data);
947 void	outsb(u_int port, void *addr, size_t cnt);
948 void	outsl(u_int port, void *addr, size_t cnt);
949 void	outsw(u_int port, void *addr, size_t cnt);
950 void	outw(u_int port, u_short data);
951 void	ia32_pause(void);
952 u_int	rcr0(void);
953 u_int	rcr2(void);
954 u_int	rcr3(void);
955 u_int	rcr4(void);
956 u_short	rfs(void);
957 u_short	rgs(void);
958 u_int64_t rdmsr(u_int msr);
959 u_int64_t rdpmc(u_int pmc);
960 u_int64_t rdtsc(void);
961 u_int	read_rflags(void);
962 void	wbinvd(void);
963 void	write_rflags(u_int rf);
964 void	wrmsr(u_int msr, u_int64_t newval);
965 u_int64_t	rdr0(void);
966 void	load_dr0(u_int64_t dr0);
967 u_int64_t	rdr1(void);
968 void	load_dr1(u_int64_t dr1);
969 u_int64_t	rdr2(void);
970 void	load_dr2(u_int64_t dr2);
971 u_int64_t	rdr3(void);
972 void	load_dr3(u_int64_t dr3);
973 u_int64_t	rdr4(void);
974 void	load_dr4(u_int64_t dr4);
975 u_int64_t	rdr5(void);
976 void	load_dr5(u_int64_t dr5);
977 u_int64_t	rdr6(void);
978 void	load_dr6(u_int64_t dr6);
979 u_int64_t	rdr7(void);
980 void	load_dr7(u_int64_t dr7);
981 register_t	intr_disable(void);
982 void	intr_restore(register_t rf);
983 
984 #endif	/* __GNUC__ */
985 
986 int	rdmsr_safe(u_int msr, uint64_t *val);
987 void	reset_dbregs(void);
988 
989 __END_DECLS
990 
991 #endif /* !_CPU_CPUFUNC_H_ */
992