xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision e6e77800)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
32  */
33 
34 /*
35  * Functions to provide access to special i386 instructions.
36  * This in included in sys/systm.h, and that file should be
37  * used in preference to this.
38  */
39 
40 #ifndef _CPU_CPUFUNC_H_
41 #define	_CPU_CPUFUNC_H_
42 
43 #include <sys/cdefs.h>
44 #include <sys/thread.h>
45 #include <machine/clock.h>
46 #include <machine/psl.h>
47 #include <machine/smp.h>
48 
49 struct thread;
50 struct region_descriptor;
51 struct pmap;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #include <machine/lock.h>		/* XXX */
67 
68 static __inline void
69 breakpoint(void)
70 {
71 	__asm __volatile("int $3");
72 }
73 
74 static __inline void
75 cpu_pause(void)
76 {
77 	__asm __volatile("pause":::"memory");
78 }
79 
80 static __inline u_int
81 bsfl(u_int mask)
82 {
83 	u_int	result;
84 
85 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
86 	return (result);
87 }
88 
89 static __inline u_long
90 bsfq(u_long mask)
91 {
92 	u_long	result;
93 
94 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
95 	return (result);
96 }
97 
98 static __inline u_long
99 bsflong(u_long mask)
100 {
101 	u_long	result;
102 
103 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
104 	return (result);
105 }
106 
107 static __inline u_int
108 bsrl(u_int mask)
109 {
110 	u_int	result;
111 
112 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
113 	return (result);
114 }
115 
116 static __inline u_long
117 bsrq(u_long mask)
118 {
119 	u_long	result;
120 
121 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
122 	return (result);
123 }
124 
125 static __inline void
126 clflush(u_long addr)
127 {
128 	__asm __volatile("clflush %0" : : "m" (*(char *) addr));
129 }
130 
131 static __inline void
132 do_cpuid(u_int ax, u_int *p)
133 {
134 	__asm __volatile("cpuid"
135 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
136 			 :  "0" (ax));
137 }
138 
139 static __inline void
140 cpuid_count(u_int ax, u_int cx, u_int *p)
141 {
142 	__asm __volatile("cpuid"
143 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
144 			 :  "0" (ax), "c" (cx));
145 }
146 
147 #ifndef _CPU_DISABLE_INTR_DEFINED
148 
149 static __inline void
150 cpu_disable_intr(void)
151 {
152 	__asm __volatile("cli" : : : "memory");
153 }
154 
155 #endif
156 
157 #ifndef _CPU_ENABLE_INTR_DEFINED
158 
159 static __inline void
160 cpu_enable_intr(void)
161 {
162 	__asm __volatile("sti");
163 }
164 
165 #endif
166 
167 /*
168  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
169  * write ordering.
170  *
171  * A serializing or fence instruction is required here.  A locked bus
172  * cycle on data for which we already own cache mastership is the most
173  * portable.
174  */
175 static __inline void
176 cpu_mfence(void)
177 {
178 	__asm __volatile("mfence" : : : "memory");
179 }
180 
181 /*
182  * cpu_lfence() ensures strong read ordering for reads issued prior
183  * to the instruction verses reads issued afterwords.
184  *
185  * A serializing or fence instruction is required here.  A locked bus
186  * cycle on data for which we already own cache mastership is the most
187  * portable.
188  */
189 static __inline void
190 cpu_lfence(void)
191 {
192 	__asm __volatile("lfence" : : : "memory");
193 }
194 
195 /*
196  * cpu_sfence() ensures strong write ordering for writes issued prior
197  * to the instruction verses writes issued afterwords.  Writes are
198  * ordered on intel cpus so we do not actually have to do anything.
199  */
200 static __inline void
201 cpu_sfence(void)
202 {
203 	/*
204 	 * NOTE:
205 	 * Don't use 'sfence' here, as it will create a lot of
206 	 * unnecessary stalls.
207 	 */
208 	__asm __volatile("" : : : "memory");
209 }
210 
211 /*
212  * cpu_ccfence() prevents the compiler from reordering instructions, in
213  * particular stores, relative to the current cpu.  Use cpu_sfence() if
214  * you need to guarentee ordering by both the compiler and by the cpu.
215  *
216  * This also prevents the compiler from caching memory loads into local
217  * variables across the routine.
218  */
219 static __inline void
220 cpu_ccfence(void)
221 {
222 	__asm __volatile("" : : : "memory");
223 }
224 
225 /*
226  * This is a horrible, horrible hack that might have to be put at the
227  * end of certain procedures (on a case by case basis), just before it
228  * returns to avoid what we believe to be an unreported AMD cpu bug.
229  * Found to occur on both a Phenom II X4 820 (two of them), as well
230  * as a 48-core built around an Opteron 6168 (Id = 0x100f91  Stepping = 1).
231  * The problem does not appear to occur w/Intel cpus.
232  *
233  * The bug is likely related to either a write combining issue or the
234  * Return Address Stack (RAS) hardware cache.
235  *
236  * In particular, we had to do this for GCC's fill_sons_in_loop() routine
237  * which due to its deep recursion and stack flow appears to be able to
238  * tickle the amd cpu bug (w/ gcc-4.4.7).  Adding a single 'nop' to the
239  * end of the routine just before it returns works around the bug.
240  *
241  * The bug appears to be extremely sensitive to %rip and %rsp values, to
242  * the point where even just inserting an instruction in an unrelated
243  * procedure (shifting the entire code base being run) effects the outcome.
244  * DragonFly is probably able to more readily reproduce the bug due to
245  * the stackgap randomization code.  We would expect OpenBSD (where we got
246  * the stackgap randomization code from) to also be able to reproduce the
247  * issue.  To date we have only reproduced the issue in DragonFly.
248  */
249 #define __AMDCPUBUG_DFLY01_AVAILABLE__
250 
251 static __inline void
252 cpu_amdcpubug_dfly01(void)
253 {
254 	__asm __volatile("nop" : : : "memory");
255 }
256 
257 #ifdef _KERNEL
258 
259 #define	HAVE_INLINE_FFS
260 
261 static __inline int
262 ffs(int mask)
263 {
264 #if 0
265 	/*
266 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
267 	 * this inline or turn off the builtin.  The builtin is faster but
268 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
269 	 * versions.
270 	 */
271 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
272 #else
273 	/* Actually, the above is way out of date.  The builtins use cmov etc */
274 	return (__builtin_ffs(mask));
275 #endif
276 }
277 
278 #define	HAVE_INLINE_FFSL
279 
280 static __inline int
281 ffsl(long mask)
282 {
283 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
284 }
285 
286 #define	HAVE_INLINE_FLS
287 
288 static __inline int
289 fls(int mask)
290 {
291 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
292 }
293 
294 #define	HAVE_INLINE_FLSL
295 
296 static __inline int
297 flsl(long mask)
298 {
299 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
300 }
301 
302 #define	HAVE_INLINE_FLSLL
303 
304 static __inline int
305 flsll(long long mask)
306 {
307 	return (flsl((long)mask));
308 }
309 
310 #endif /* _KERNEL */
311 
312 static __inline void
313 halt(void)
314 {
315 	__asm __volatile("hlt");
316 }
317 
318 /*
319  * The following complications are to get around gcc not having a
320  * constraint letter for the range 0..255.  We still put "d" in the
321  * constraint because "i" isn't a valid constraint when the port
322  * isn't constant.  This only matters for -O0 because otherwise
323  * the non-working version gets optimized away.
324  *
325  * Use an expression-statement instead of a conditional expression
326  * because gcc-2.6.0 would promote the operands of the conditional
327  * and produce poor code for "if ((inb(var) & const1) == const2)".
328  *
329  * The unnecessary test `(port) < 0x10000' is to generate a warning if
330  * the `port' has type u_short or smaller.  Such types are pessimal.
331  * This actually only works for signed types.  The range check is
332  * careful to avoid generating warnings.
333  */
334 #define	inb(port) __extension__ ({					\
335 	u_char	_data;							\
336 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
337 	    && (port) < 0x10000)					\
338 		_data = inbc(port);					\
339 	else								\
340 		_data = inbv(port);					\
341 	_data; })
342 
343 #define	outb(port, data) (						\
344 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
345 	&& (port) < 0x10000						\
346 	? outbc(port, data) : outbv(port, data))
347 
348 static __inline u_char
349 inbc(u_int port)
350 {
351 	u_char	data;
352 
353 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
354 	return (data);
355 }
356 
357 static __inline void
358 outbc(u_int port, u_char data)
359 {
360 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
361 }
362 
363 static __inline u_char
364 inbv(u_int port)
365 {
366 	u_char	data;
367 	/*
368 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
369 	 * %edx, while gcc generates inferior code (movw instead of movl)
370 	 * if we tell it to load (u_short) port.
371 	 */
372 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
373 	return (data);
374 }
375 
376 static __inline u_int
377 inl(u_int port)
378 {
379 	u_int	data;
380 
381 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
382 	return (data);
383 }
384 
385 static __inline void
386 insb(u_int port, void *addr, size_t cnt)
387 {
388 	__asm __volatile("cld; rep; insb"
389 			 : "+D" (addr), "+c" (cnt)
390 			 : "d" (port)
391 			 : "memory");
392 }
393 
394 static __inline void
395 insw(u_int port, void *addr, size_t cnt)
396 {
397 	__asm __volatile("cld; rep; insw"
398 			 : "+D" (addr), "+c" (cnt)
399 			 : "d" (port)
400 			 : "memory");
401 }
402 
403 static __inline void
404 insl(u_int port, void *addr, size_t cnt)
405 {
406 	__asm __volatile("cld; rep; insl"
407 			 : "+D" (addr), "+c" (cnt)
408 			 : "d" (port)
409 			 : "memory");
410 }
411 
412 static __inline void
413 invd(void)
414 {
415 	__asm __volatile("invd");
416 }
417 
418 #if defined(_KERNEL)
419 
420 #ifndef _CPU_INVLPG_DEFINED
421 
422 /*
423  * Invalidate a particular VA on this cpu only
424  *
425  * TLB flush for an individual page (even if it has PG_G).
426  * Only works on 486+ CPUs (i386 does not have PG_G).
427  */
428 static __inline void
429 cpu_invlpg(void *addr)
430 {
431 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
432 }
433 
434 #endif
435 
436 static __inline void
437 cpu_nop(void)
438 {
439 	__asm __volatile("rep; nop");
440 }
441 
442 #endif	/* _KERNEL */
443 
444 static __inline u_short
445 inw(u_int port)
446 {
447 	u_short	data;
448 
449 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
450 	return (data);
451 }
452 
453 static __inline u_int
454 loadandclear(volatile u_int *addr)
455 {
456 	u_int   result;
457 
458 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
459 			: "=&r" (result) : "m" (*addr));
460 	return (result);
461 }
462 
463 static __inline void
464 outbv(u_int port, u_char data)
465 {
466 	u_char	al;
467 	/*
468 	 * Use an unnecessary assignment to help gcc's register allocator.
469 	 * This make a large difference for gcc-1.40 and a tiny difference
470 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
471 	 * best results.  gcc-2.6.0 can't handle this.
472 	 */
473 	al = data;
474 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
475 }
476 
477 static __inline void
478 outl(u_int port, u_int data)
479 {
480 	/*
481 	 * outl() and outw() aren't used much so we haven't looked at
482 	 * possible micro-optimizations such as the unnecessary
483 	 * assignment for them.
484 	 */
485 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
486 }
487 
488 static __inline void
489 outsb(u_int port, const void *addr, size_t cnt)
490 {
491 	__asm __volatile("cld; rep; outsb"
492 			 : "+S" (addr), "+c" (cnt)
493 			 : "d" (port));
494 }
495 
496 static __inline void
497 outsw(u_int port, const void *addr, size_t cnt)
498 {
499 	__asm __volatile("cld; rep; outsw"
500 			 : "+S" (addr), "+c" (cnt)
501 			 : "d" (port));
502 }
503 
504 static __inline void
505 outsl(u_int port, const void *addr, size_t cnt)
506 {
507 	__asm __volatile("cld; rep; outsl"
508 			 : "+S" (addr), "+c" (cnt)
509 			 : "d" (port));
510 }
511 
512 static __inline void
513 outw(u_int port, u_short data)
514 {
515 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
516 }
517 
518 static __inline void
519 ia32_pause(void)
520 {
521 	__asm __volatile("pause");
522 }
523 
524 static __inline u_long
525 read_rflags(void)
526 {
527 	u_long	rf;
528 
529 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
530 	return (rf);
531 }
532 
533 static __inline u_int64_t
534 rdmsr(u_int msr)
535 {
536 	u_int32_t low, high;
537 
538 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
539 	return (low | ((u_int64_t)high << 32));
540 }
541 
542 static __inline u_int64_t
543 rdpmc(u_int pmc)
544 {
545 	u_int32_t low, high;
546 
547 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
548 	return (low | ((u_int64_t)high << 32));
549 }
550 
551 #define _RDTSC_SUPPORTED_
552 
553 static __inline tsc_uclock_t
554 rdtsc(void)
555 {
556 	u_int32_t low, high;
557 
558 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
559 	return (low | ((tsc_uclock_t)high << 32));
560 }
561 
562 #ifdef _KERNEL
563 #include <machine/cputypes.h>
564 #include <machine/md_var.h>
565 
566 static __inline tsc_uclock_t
567 rdtsc_ordered(void)
568 {
569 	if (cpu_vendor_id == CPU_VENDOR_INTEL)
570 		cpu_lfence();
571 	else
572 		cpu_mfence();
573 	return rdtsc();
574 }
575 #endif
576 
577 static __inline void
578 wbinvd(void)
579 {
580 	__asm __volatile("wbinvd");
581 }
582 
583 #if defined(_KERNEL)
584 void cpu_wbinvd_on_all_cpus_callback(void *arg);
585 
586 static __inline void
587 cpu_wbinvd_on_all_cpus(void)
588 {
589 	lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
590 }
591 #endif
592 
593 static __inline void
594 write_rflags(u_long rf)
595 {
596 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
597 }
598 
599 static __inline void
600 wrmsr(u_int msr, u_int64_t newval)
601 {
602 	u_int32_t low, high;
603 
604 	low = newval;
605 	high = newval >> 32;
606 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
607 }
608 
609 static __inline void
610 xsetbv(u_int ecx, u_int eax, u_int edx)
611 {
612 	__asm __volatile(".byte 0x0f,0x01,0xd1"
613 	    :
614 	    : "a" (eax), "c" (ecx), "d" (edx));
615 }
616 
617 static __inline void
618 load_cr0(u_long data)
619 {
620 
621 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
622 }
623 
624 static __inline u_long
625 rcr0(void)
626 {
627 	u_long	data;
628 
629 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
630 	return (data);
631 }
632 
633 static __inline u_long
634 rcr2(void)
635 {
636 	u_long	data;
637 
638 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
639 	return (data);
640 }
641 
642 static __inline void
643 load_cr3(u_long data)
644 {
645 
646 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
647 }
648 
649 static __inline u_long
650 rcr3(void)
651 {
652 	u_long	data;
653 
654 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
655 	return (data);
656 }
657 
658 static __inline void
659 load_cr4(u_long data)
660 {
661 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
662 }
663 
664 static __inline u_long
665 rcr4(void)
666 {
667 	u_long	data;
668 
669 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
670 	return (data);
671 }
672 
673 #ifndef _CPU_INVLTLB_DEFINED
674 
675 /*
676  * Invalidate the TLB on this cpu only
677  */
678 static __inline void
679 cpu_invltlb(void)
680 {
681 	load_cr3(rcr3());
682 #if defined(SWTCH_OPTIM_STATS)
683 	++tlb_flush_count;
684 #endif
685 }
686 
687 #endif
688 
689 extern void smp_invltlb(void);
690 extern void smp_sniff(void);
691 extern void cpu_sniff(int dcpu);
692 
693 static __inline u_short
694 rfs(void)
695 {
696 	u_short sel;
697 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
698 	return (sel);
699 }
700 
701 static __inline u_short
702 rgs(void)
703 {
704 	u_short sel;
705 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
706 	return (sel);
707 }
708 
709 static __inline void
710 load_ds(u_short sel)
711 {
712 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
713 }
714 
715 static __inline void
716 load_es(u_short sel)
717 {
718 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
719 }
720 
721 #ifdef _KERNEL
722 /* This is defined in <machine/specialreg.h> but is too painful to get to */
723 #ifndef	MSR_FSBASE
724 #define	MSR_FSBASE	0xc0000100
725 #endif
726 static __inline void
727 load_fs(u_short sel)
728 {
729 	/* Preserve the fsbase value across the selector load */
730 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
731             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
732 }
733 
734 #ifndef	MSR_GSBASE
735 #define	MSR_GSBASE	0xc0000101
736 #endif
737 static __inline void
738 load_gs(u_short sel)
739 {
740 	/*
741 	 * Preserve the gsbase value across the selector load.
742 	 * Note that we have to disable interrupts because the gsbase
743 	 * being trashed happens to be the kernel gsbase at the time.
744 	 */
745 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
746             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
747 }
748 #else
749 /* Usable by userland */
750 static __inline void
751 load_fs(u_short sel)
752 {
753 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
754 }
755 
756 static __inline void
757 load_gs(u_short sel)
758 {
759 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
760 }
761 #endif
762 
763 /* void lidt(struct region_descriptor *addr); */
764 static __inline void
765 lidt(struct region_descriptor *addr)
766 {
767 	__asm __volatile("lidt (%0)" : : "r" (addr));
768 }
769 
770 /* void lldt(u_short sel); */
771 static __inline void
772 lldt(u_short sel)
773 {
774 	__asm __volatile("lldt %0" : : "r" (sel));
775 }
776 
777 /* void ltr(u_short sel); */
778 static __inline void
779 ltr(u_short sel)
780 {
781 	__asm __volatile("ltr %0" : : "r" (sel));
782 }
783 
784 static __inline u_int64_t
785 rdr0(void)
786 {
787 	u_int64_t data;
788 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
789 	return (data);
790 }
791 
792 static __inline void
793 load_dr0(u_int64_t dr0)
794 {
795 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
796 }
797 
798 static __inline u_int64_t
799 rdr1(void)
800 {
801 	u_int64_t data;
802 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
803 	return (data);
804 }
805 
806 static __inline void
807 load_dr1(u_int64_t dr1)
808 {
809 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
810 }
811 
812 static __inline u_int64_t
813 rdr2(void)
814 {
815 	u_int64_t data;
816 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
817 	return (data);
818 }
819 
820 static __inline void
821 load_dr2(u_int64_t dr2)
822 {
823 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
824 }
825 
826 static __inline u_int64_t
827 rdr3(void)
828 {
829 	u_int64_t data;
830 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
831 	return (data);
832 }
833 
834 static __inline void
835 load_dr3(u_int64_t dr3)
836 {
837 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
838 }
839 
840 static __inline u_int64_t
841 rdr4(void)
842 {
843 	u_int64_t data;
844 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
845 	return (data);
846 }
847 
848 static __inline void
849 load_dr4(u_int64_t dr4)
850 {
851 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
852 }
853 
854 static __inline u_int64_t
855 rdr5(void)
856 {
857 	u_int64_t data;
858 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
859 	return (data);
860 }
861 
862 static __inline void
863 load_dr5(u_int64_t dr5)
864 {
865 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
866 }
867 
868 static __inline u_int64_t
869 rdr6(void)
870 {
871 	u_int64_t data;
872 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
873 	return (data);
874 }
875 
876 static __inline void
877 load_dr6(u_int64_t dr6)
878 {
879 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
880 }
881 
882 static __inline u_int64_t
883 rdr7(void)
884 {
885 	u_int64_t data;
886 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
887 	return (data);
888 }
889 
890 static __inline void
891 load_dr7(u_int64_t dr7)
892 {
893 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
894 }
895 
896 static __inline register_t
897 intr_disable(void)
898 {
899 	register_t rflags;
900 
901 	rflags = read_rflags();
902 	cpu_disable_intr();
903 	return (rflags);
904 }
905 
906 static __inline void
907 intr_restore(register_t rflags)
908 {
909 	write_rflags(rflags);
910 }
911 
912 #else /* !__GNUC__ */
913 
914 int	breakpoint(void);
915 void	cpu_pause(void);
916 u_int	bsfl(u_int mask);
917 u_int	bsrl(u_int mask);
918 void	cpu_disable_intr(void);
919 void	cpu_enable_intr(void);
920 void	cpu_invlpg(u_long addr);
921 void	cpu_invlpg_range(u_long start, u_long end);
922 void	do_cpuid(u_int ax, u_int *p);
923 void	halt(void);
924 u_char	inb(u_int port);
925 u_int	inl(u_int port);
926 void	insb(u_int port, void *addr, size_t cnt);
927 void	insl(u_int port, void *addr, size_t cnt);
928 void	insw(u_int port, void *addr, size_t cnt);
929 void	invd(void);
930 void	invlpg_range(u_int start, u_int end);
931 void	cpu_invltlb(void);
932 u_short	inw(u_int port);
933 void	load_cr0(u_int cr0);
934 void	load_cr3(u_int cr3);
935 void	load_cr4(u_int cr4);
936 void	load_fs(u_int sel);
937 void	load_gs(u_int sel);
938 struct region_descriptor;
939 void	lidt(struct region_descriptor *addr);
940 void	lldt(u_short sel);
941 void	ltr(u_short sel);
942 void	outb(u_int port, u_char data);
943 void	outl(u_int port, u_int data);
944 void	outsb(u_int port, void *addr, size_t cnt);
945 void	outsl(u_int port, void *addr, size_t cnt);
946 void	outsw(u_int port, void *addr, size_t cnt);
947 void	outw(u_int port, u_short data);
948 void	ia32_pause(void);
949 u_int	rcr0(void);
950 u_int	rcr2(void);
951 u_int	rcr3(void);
952 u_int	rcr4(void);
953 u_short	rfs(void);
954 u_short	rgs(void);
955 u_int64_t rdmsr(u_int msr);
956 u_int64_t rdpmc(u_int pmc);
957 tsc_uclock_t rdtsc(void);
958 u_int	read_rflags(void);
959 void	wbinvd(void);
960 void	write_rflags(u_int rf);
961 void	wrmsr(u_int msr, u_int64_t newval);
962 u_int64_t	rdr0(void);
963 void	load_dr0(u_int64_t dr0);
964 u_int64_t	rdr1(void);
965 void	load_dr1(u_int64_t dr1);
966 u_int64_t	rdr2(void);
967 void	load_dr2(u_int64_t dr2);
968 u_int64_t	rdr3(void);
969 void	load_dr3(u_int64_t dr3);
970 u_int64_t	rdr4(void);
971 void	load_dr4(u_int64_t dr4);
972 u_int64_t	rdr5(void);
973 void	load_dr5(u_int64_t dr5);
974 u_int64_t	rdr6(void);
975 void	load_dr6(u_int64_t dr6);
976 u_int64_t	rdr7(void);
977 void	load_dr7(u_int64_t dr7);
978 register_t	intr_disable(void);
979 void	intr_restore(register_t rf);
980 
981 #endif	/* __GNUC__ */
982 
983 int	rdmsr_safe(u_int msr, uint64_t *val);
984 int wrmsr_safe(u_int msr, uint64_t newval);
985 void	reset_dbregs(void);
986 
987 __END_DECLS
988 
989 #endif /* !_CPU_CPUFUNC_H_ */
990