xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision fb151170)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36  */
37 
38 /*
39  * Functions to provide access to special i386 instructions.
40  * This in included in sys/systm.h, and that file should be
41  * used in preference to this.
42  */
43 
44 #ifndef _CPU_CPUFUNC_H_
45 #define	_CPU_CPUFUNC_H_
46 
47 #include <sys/cdefs.h>
48 #include <machine/psl.h>
49 
50 struct thread;
51 struct region_descriptor;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #ifdef SMP
67 #include <machine/lock.h>		/* XXX */
68 #endif
69 
70 static __inline void
71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
77 cpu_pause(void)
78 {
79 	__asm __volatile("pause");
80 }
81 
82 static __inline u_int
83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_long
101 bsflong(u_long mask)
102 {
103 	u_long	result;
104 
105 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_int
110 bsrl(u_int mask)
111 {
112 	u_int	result;
113 
114 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline u_long
119 bsrq(u_long mask)
120 {
121 	u_long	result;
122 
123 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
124 	return (result);
125 }
126 
127 static __inline void
128 do_cpuid(u_int ax, u_int *p)
129 {
130 	__asm __volatile("cpuid"
131 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
132 			 :  "0" (ax));
133 }
134 
135 static __inline void
136 cpuid_count(u_int ax, u_int cx, u_int *p)
137 {
138 	__asm __volatile("cpuid"
139 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
140 			 :  "0" (ax), "c" (cx));
141 }
142 
143 #ifndef _CPU_DISABLE_INTR_DEFINED
144 
145 static __inline void
146 cpu_disable_intr(void)
147 {
148 	__asm __volatile("cli" : : : "memory");
149 }
150 
151 #endif
152 
153 #ifndef _CPU_ENABLE_INTR_DEFINED
154 
155 static __inline void
156 cpu_enable_intr(void)
157 {
158 	__asm __volatile("sti");
159 }
160 
161 #endif
162 
163 /*
164  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
165  * write ordering.
166  *
167  * A serializing or fence instruction is required here.  A locked bus
168  * cycle on data for which we already own cache mastership is the most
169  * portable.
170  */
171 static __inline void
172 cpu_mfence(void)
173 {
174 #ifdef SMP
175 	__asm __volatile("mfence" : : : "memory");
176 #else
177 	__asm __volatile("" : : : "memory");
178 #endif
179 }
180 
181 /*
182  * cpu_lfence() ensures strong read ordering for reads issued prior
183  * to the instruction verses reads issued afterwords.
184  *
185  * A serializing or fence instruction is required here.  A locked bus
186  * cycle on data for which we already own cache mastership is the most
187  * portable.
188  */
189 static __inline void
190 cpu_lfence(void)
191 {
192 #ifdef SMP
193 	__asm __volatile("lfence" : : : "memory");
194 #else
195 	__asm __volatile("" : : : "memory");
196 #endif
197 }
198 
199 /*
200  * cpu_sfence() ensures strong write ordering for writes issued prior
201  * to the instruction verses writes issued afterwords.  Writes are
202  * ordered on intel cpus so we do not actually have to do anything.
203  */
204 static __inline void
205 cpu_sfence(void)
206 {
207 	/*
208 	 * NOTE:
209 	 * Don't use 'sfence' here, as it will create a lot of
210 	 * unnecessary stalls.
211 	 */
212 	__asm __volatile("" : : : "memory");
213 }
214 
215 /*
216  * cpu_ccfence() prevents the compiler from reordering instructions, in
217  * particular stores, relative to the current cpu.  Use cpu_sfence() if
218  * you need to guarentee ordering by both the compiler and by the cpu.
219  *
220  * This also prevents the compiler from caching memory loads into local
221  * variables across the routine.
222  */
223 static __inline void
224 cpu_ccfence(void)
225 {
226 	__asm __volatile("" : : : "memory");
227 }
228 
229 /*
230  * This is a horrible, horrible hack that might have to be put at the
231  * end of certain procedures (on a case by case basis), just before it
232  * returns to avoid what we believe to be an unreported AMD cpu bug.
233  * Found to occur on both a Phenom II X4 820 (two of them), as well
234  * as a 48-core built around an Opteron 6168 (Id = 0x100f91  Stepping = 1).
235  * The problem does not appear to occur w/Intel cpus.
236  *
237  * The bug is likely related to either a write combining issue or the
238  * Return Address Stack (RAS) hardware cache.
239  *
240  * In particular, we had to do this for GCC's fill_sons_in_loop() routine
241  * which due to its deep recursion and stack flow appears to be able to
242  * tickle the amd cpu bug (w/ gcc-4.4.7).  Adding a single 'nop' to the
243  * end of the routine just before it returns works around the bug.
244  *
245  * The bug appears to be extremely sensitive to %rip and %rsp values, to
246  * the point where even just inserting an instruction in an unrelated
247  * procedure (shifting the entire code base being run) effects the outcome.
248  * DragonFly is probably able to more readily reproduce the bug due to
249  * the stackgap randomization code.  We would expect OpenBSD (where we got
250  * the stackgap randomization code from) to also be able to reproduce the
251  * issue.  To date we have only reproduced the issue in DragonFly.
252  */
253 #define __AMDCPUBUG_DFLY01_AVAILABLE__
254 
255 static __inline void
256 cpu_amdcpubug_dfly01(void)
257 {
258 	__asm __volatile("nop" : : : "memory");
259 }
260 
261 #ifdef _KERNEL
262 
263 #define	HAVE_INLINE_FFS
264 
265 static __inline int
266 ffs(int mask)
267 {
268 #if 0
269 	/*
270 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
271 	 * this inline or turn off the builtin.  The builtin is faster but
272 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
273 	 * versions.
274 	 */
275 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
276 #else
277 	/* Actually, the above is way out of date.  The builtins use cmov etc */
278 	return (__builtin_ffs(mask));
279 #endif
280 }
281 
282 #define	HAVE_INLINE_FFSL
283 
284 static __inline int
285 ffsl(long mask)
286 {
287 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
288 }
289 
290 #define	HAVE_INLINE_FLS
291 
292 static __inline int
293 fls(int mask)
294 {
295 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
296 }
297 
298 #define	HAVE_INLINE_FLSL
299 
300 static __inline int
301 flsl(long mask)
302 {
303 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
304 }
305 
306 #endif /* _KERNEL */
307 
308 static __inline void
309 halt(void)
310 {
311 	__asm __volatile("hlt");
312 }
313 
314 /*
315  * The following complications are to get around gcc not having a
316  * constraint letter for the range 0..255.  We still put "d" in the
317  * constraint because "i" isn't a valid constraint when the port
318  * isn't constant.  This only matters for -O0 because otherwise
319  * the non-working version gets optimized away.
320  *
321  * Use an expression-statement instead of a conditional expression
322  * because gcc-2.6.0 would promote the operands of the conditional
323  * and produce poor code for "if ((inb(var) & const1) == const2)".
324  *
325  * The unnecessary test `(port) < 0x10000' is to generate a warning if
326  * the `port' has type u_short or smaller.  Such types are pessimal.
327  * This actually only works for signed types.  The range check is
328  * careful to avoid generating warnings.
329  */
330 #define	inb(port) __extension__ ({					\
331 	u_char	_data;							\
332 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
333 	    && (port) < 0x10000)					\
334 		_data = inbc(port);					\
335 	else								\
336 		_data = inbv(port);					\
337 	_data; })
338 
339 #define	outb(port, data) (						\
340 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
341 	&& (port) < 0x10000						\
342 	? outbc(port, data) : outbv(port, data))
343 
344 static __inline u_char
345 inbc(u_int port)
346 {
347 	u_char	data;
348 
349 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
350 	return (data);
351 }
352 
353 static __inline void
354 outbc(u_int port, u_char data)
355 {
356 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
357 }
358 
359 static __inline u_char
360 inbv(u_int port)
361 {
362 	u_char	data;
363 	/*
364 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
365 	 * %edx, while gcc generates inferior code (movw instead of movl)
366 	 * if we tell it to load (u_short) port.
367 	 */
368 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
369 	return (data);
370 }
371 
372 static __inline u_int
373 inl(u_int port)
374 {
375 	u_int	data;
376 
377 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
378 	return (data);
379 }
380 
381 static __inline void
382 insb(u_int port, void *addr, size_t cnt)
383 {
384 	__asm __volatile("cld; rep; insb"
385 			 : "+D" (addr), "+c" (cnt)
386 			 : "d" (port)
387 			 : "memory");
388 }
389 
390 static __inline void
391 insw(u_int port, void *addr, size_t cnt)
392 {
393 	__asm __volatile("cld; rep; insw"
394 			 : "+D" (addr), "+c" (cnt)
395 			 : "d" (port)
396 			 : "memory");
397 }
398 
399 static __inline void
400 insl(u_int port, void *addr, size_t cnt)
401 {
402 	__asm __volatile("cld; rep; insl"
403 			 : "+D" (addr), "+c" (cnt)
404 			 : "d" (port)
405 			 : "memory");
406 }
407 
408 static __inline void
409 invd(void)
410 {
411 	__asm __volatile("invd");
412 }
413 
414 #if defined(_KERNEL)
415 
416 /*
417  * If we are not a true-SMP box then smp_invltlb() is a NOP.  Note that this
418  * will cause the invl*() functions to be equivalent to the cpu_invl*()
419  * functions.
420  */
421 #ifdef SMP
422 void smp_invltlb(void);
423 void smp_invltlb_intr(void);
424 #else
425 #define smp_invltlb()
426 #endif
427 
428 #ifndef _CPU_INVLPG_DEFINED
429 
430 /*
431  * Invalidate a patricular VA on this cpu only
432  */
433 static __inline void
434 cpu_invlpg(void *addr)
435 {
436 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
437 }
438 
439 #endif
440 
441 static __inline void
442 cpu_nop(void)
443 {
444 	__asm __volatile("rep; nop");
445 }
446 
447 #endif	/* _KERNEL */
448 
449 static __inline u_short
450 inw(u_int port)
451 {
452 	u_short	data;
453 
454 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
455 	return (data);
456 }
457 
458 static __inline u_int
459 loadandclear(volatile u_int *addr)
460 {
461 	u_int   result;
462 
463 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
464 			: "=&r" (result) : "m" (*addr));
465 	return (result);
466 }
467 
468 static __inline void
469 outbv(u_int port, u_char data)
470 {
471 	u_char	al;
472 	/*
473 	 * Use an unnecessary assignment to help gcc's register allocator.
474 	 * This make a large difference for gcc-1.40 and a tiny difference
475 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
476 	 * best results.  gcc-2.6.0 can't handle this.
477 	 */
478 	al = data;
479 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
480 }
481 
482 static __inline void
483 outl(u_int port, u_int data)
484 {
485 	/*
486 	 * outl() and outw() aren't used much so we haven't looked at
487 	 * possible micro-optimizations such as the unnecessary
488 	 * assignment for them.
489 	 */
490 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
491 }
492 
493 static __inline void
494 outsb(u_int port, const void *addr, size_t cnt)
495 {
496 	__asm __volatile("cld; rep; outsb"
497 			 : "+S" (addr), "+c" (cnt)
498 			 : "d" (port));
499 }
500 
501 static __inline void
502 outsw(u_int port, const void *addr, size_t cnt)
503 {
504 	__asm __volatile("cld; rep; outsw"
505 			 : "+S" (addr), "+c" (cnt)
506 			 : "d" (port));
507 }
508 
509 static __inline void
510 outsl(u_int port, const void *addr, size_t cnt)
511 {
512 	__asm __volatile("cld; rep; outsl"
513 			 : "+S" (addr), "+c" (cnt)
514 			 : "d" (port));
515 }
516 
517 static __inline void
518 outw(u_int port, u_short data)
519 {
520 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
521 }
522 
523 static __inline void
524 ia32_pause(void)
525 {
526 	__asm __volatile("pause");
527 }
528 
529 static __inline u_long
530 read_rflags(void)
531 {
532 	u_long	rf;
533 
534 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
535 	return (rf);
536 }
537 
538 static __inline u_int64_t
539 rdmsr(u_int msr)
540 {
541 	u_int32_t low, high;
542 
543 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
544 	return (low | ((u_int64_t)high << 32));
545 }
546 
547 static __inline u_int64_t
548 rdpmc(u_int pmc)
549 {
550 	u_int32_t low, high;
551 
552 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
553 	return (low | ((u_int64_t)high << 32));
554 }
555 
556 #define _RDTSC_SUPPORTED_
557 
558 static __inline u_int64_t
559 rdtsc(void)
560 {
561 	u_int32_t low, high;
562 
563 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
564 	return (low | ((u_int64_t)high << 32));
565 }
566 
567 static __inline void
568 wbinvd(void)
569 {
570 	__asm __volatile("wbinvd");
571 }
572 
573 static __inline void
574 write_rflags(u_long rf)
575 {
576 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
577 }
578 
579 static __inline void
580 wrmsr(u_int msr, u_int64_t newval)
581 {
582 	u_int32_t low, high;
583 
584 	low = newval;
585 	high = newval >> 32;
586 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
587 }
588 
589 static __inline void
590 load_cr0(u_long data)
591 {
592 
593 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
594 }
595 
596 static __inline u_long
597 rcr0(void)
598 {
599 	u_long	data;
600 
601 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
602 	return (data);
603 }
604 
605 static __inline u_long
606 rcr2(void)
607 {
608 	u_long	data;
609 
610 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
611 	return (data);
612 }
613 
614 static __inline void
615 load_cr3(u_long data)
616 {
617 
618 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
619 }
620 
621 static __inline u_long
622 rcr3(void)
623 {
624 	u_long	data;
625 
626 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
627 	return (data);
628 }
629 
630 static __inline void
631 load_cr4(u_long data)
632 {
633 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
634 }
635 
636 static __inline u_long
637 rcr4(void)
638 {
639 	u_long	data;
640 
641 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
642 	return (data);
643 }
644 
645 #ifndef _CPU_INVLTLB_DEFINED
646 
647 /*
648  * Invalidate the TLB on this cpu only
649  */
650 static __inline void
651 cpu_invltlb(void)
652 {
653 	load_cr3(rcr3());
654 #if defined(SWTCH_OPTIM_STATS)
655 	++tlb_flush_count;
656 #endif
657 }
658 
659 #endif
660 
661 /*
662  * TLB flush for an individual page (even if it has PG_G).
663  * Only works on 486+ CPUs (i386 does not have PG_G).
664  */
665 static __inline void
666 invlpg(u_long addr)
667 {
668 
669 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
670 }
671 
672 static __inline u_short
673 rfs(void)
674 {
675 	u_short sel;
676 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
677 	return (sel);
678 }
679 
680 static __inline u_short
681 rgs(void)
682 {
683 	u_short sel;
684 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
685 	return (sel);
686 }
687 
688 static __inline void
689 load_ds(u_short sel)
690 {
691 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
692 }
693 
694 static __inline void
695 load_es(u_short sel)
696 {
697 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
698 }
699 
700 #ifdef _KERNEL
701 /* This is defined in <machine/specialreg.h> but is too painful to get to */
702 #ifndef	MSR_FSBASE
703 #define	MSR_FSBASE	0xc0000100
704 #endif
705 static __inline void
706 load_fs(u_short sel)
707 {
708 	/* Preserve the fsbase value across the selector load */
709 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
710             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
711 }
712 
713 #ifndef	MSR_GSBASE
714 #define	MSR_GSBASE	0xc0000101
715 #endif
716 static __inline void
717 load_gs(u_short sel)
718 {
719 	/*
720 	 * Preserve the gsbase value across the selector load.
721 	 * Note that we have to disable interrupts because the gsbase
722 	 * being trashed happens to be the kernel gsbase at the time.
723 	 */
724 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
725             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
726 }
727 #else
728 /* Usable by userland */
729 static __inline void
730 load_fs(u_short sel)
731 {
732 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
733 }
734 
735 static __inline void
736 load_gs(u_short sel)
737 {
738 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
739 }
740 #endif
741 
742 /* void lidt(struct region_descriptor *addr); */
743 static __inline void
744 lidt(struct region_descriptor *addr)
745 {
746 	__asm __volatile("lidt (%0)" : : "r" (addr));
747 }
748 
749 /* void lldt(u_short sel); */
750 static __inline void
751 lldt(u_short sel)
752 {
753 	__asm __volatile("lldt %0" : : "r" (sel));
754 }
755 
756 /* void ltr(u_short sel); */
757 static __inline void
758 ltr(u_short sel)
759 {
760 	__asm __volatile("ltr %0" : : "r" (sel));
761 }
762 
763 static __inline u_int64_t
764 rdr0(void)
765 {
766 	u_int64_t data;
767 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
768 	return (data);
769 }
770 
771 static __inline void
772 load_dr0(u_int64_t dr0)
773 {
774 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
775 }
776 
777 static __inline u_int64_t
778 rdr1(void)
779 {
780 	u_int64_t data;
781 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
782 	return (data);
783 }
784 
785 static __inline void
786 load_dr1(u_int64_t dr1)
787 {
788 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
789 }
790 
791 static __inline u_int64_t
792 rdr2(void)
793 {
794 	u_int64_t data;
795 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
796 	return (data);
797 }
798 
799 static __inline void
800 load_dr2(u_int64_t dr2)
801 {
802 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
803 }
804 
805 static __inline u_int64_t
806 rdr3(void)
807 {
808 	u_int64_t data;
809 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
810 	return (data);
811 }
812 
813 static __inline void
814 load_dr3(u_int64_t dr3)
815 {
816 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
817 }
818 
819 static __inline u_int64_t
820 rdr4(void)
821 {
822 	u_int64_t data;
823 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
824 	return (data);
825 }
826 
827 static __inline void
828 load_dr4(u_int64_t dr4)
829 {
830 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
831 }
832 
833 static __inline u_int64_t
834 rdr5(void)
835 {
836 	u_int64_t data;
837 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
838 	return (data);
839 }
840 
841 static __inline void
842 load_dr5(u_int64_t dr5)
843 {
844 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
845 }
846 
847 static __inline u_int64_t
848 rdr6(void)
849 {
850 	u_int64_t data;
851 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
852 	return (data);
853 }
854 
855 static __inline void
856 load_dr6(u_int64_t dr6)
857 {
858 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
859 }
860 
861 static __inline u_int64_t
862 rdr7(void)
863 {
864 	u_int64_t data;
865 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
866 	return (data);
867 }
868 
869 static __inline void
870 load_dr7(u_int64_t dr7)
871 {
872 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
873 }
874 
875 static __inline register_t
876 intr_disable(void)
877 {
878 	register_t rflags;
879 
880 	rflags = read_rflags();
881 	cpu_disable_intr();
882 	return (rflags);
883 }
884 
885 static __inline void
886 intr_restore(register_t rflags)
887 {
888 	write_rflags(rflags);
889 }
890 
891 #else /* !__GNUC__ */
892 
893 int	breakpoint(void);
894 void	cpu_pause(void);
895 u_int	bsfl(u_int mask);
896 u_int	bsrl(u_int mask);
897 void	cpu_disable_intr(void);
898 void	cpu_enable_intr(void);
899 void	cpu_invlpg(u_long addr);
900 void	cpu_invlpg_range(u_long start, u_long end);
901 void	do_cpuid(u_int ax, u_int *p);
902 void	halt(void);
903 u_char	inb(u_int port);
904 u_int	inl(u_int port);
905 void	insb(u_int port, void *addr, size_t cnt);
906 void	insl(u_int port, void *addr, size_t cnt);
907 void	insw(u_int port, void *addr, size_t cnt);
908 void	invd(void);
909 void	invlpg(u_int addr);
910 void	invlpg_range(u_int start, u_int end);
911 void	cpu_invltlb(void);
912 u_short	inw(u_int port);
913 void	load_cr0(u_int cr0);
914 void	load_cr3(u_int cr3);
915 void	load_cr4(u_int cr4);
916 void	load_fs(u_int sel);
917 void	load_gs(u_int sel);
918 struct region_descriptor;
919 void	lidt(struct region_descriptor *addr);
920 void	lldt(u_short sel);
921 void	ltr(u_short sel);
922 void	outb(u_int port, u_char data);
923 void	outl(u_int port, u_int data);
924 void	outsb(u_int port, void *addr, size_t cnt);
925 void	outsl(u_int port, void *addr, size_t cnt);
926 void	outsw(u_int port, void *addr, size_t cnt);
927 void	outw(u_int port, u_short data);
928 void	ia32_pause(void);
929 u_int	rcr0(void);
930 u_int	rcr2(void);
931 u_int	rcr3(void);
932 u_int	rcr4(void);
933 u_short	rfs(void);
934 u_short	rgs(void);
935 u_int64_t rdmsr(u_int msr);
936 u_int64_t rdpmc(u_int pmc);
937 u_int64_t rdtsc(void);
938 u_int	read_rflags(void);
939 void	wbinvd(void);
940 void	write_rflags(u_int rf);
941 void	wrmsr(u_int msr, u_int64_t newval);
942 u_int64_t	rdr0(void);
943 void	load_dr0(u_int64_t dr0);
944 u_int64_t	rdr1(void);
945 void	load_dr1(u_int64_t dr1);
946 u_int64_t	rdr2(void);
947 void	load_dr2(u_int64_t dr2);
948 u_int64_t	rdr3(void);
949 void	load_dr3(u_int64_t dr3);
950 u_int64_t	rdr4(void);
951 void	load_dr4(u_int64_t dr4);
952 u_int64_t	rdr5(void);
953 void	load_dr5(u_int64_t dr5);
954 u_int64_t	rdr6(void);
955 void	load_dr6(u_int64_t dr6);
956 u_int64_t	rdr7(void);
957 void	load_dr7(u_int64_t dr7);
958 register_t	intr_disable(void);
959 void	intr_restore(register_t rf);
960 
961 #endif	/* __GNUC__ */
962 
963 int	rdmsr_safe(u_int msr, uint64_t *val);
964 void	reset_dbregs(void);
965 
966 __END_DECLS
967 
968 #endif /* !_CPU_CPUFUNC_H_ */
969