xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision a563ca70)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36  */
37 
38 /*
39  * Functions to provide access to special i386 instructions.
40  * This in included in sys/systm.h, and that file should be
41  * used in preference to this.
42  */
43 
44 #ifndef _CPU_CPUFUNC_H_
45 #define	_CPU_CPUFUNC_H_
46 
47 #include <sys/cdefs.h>
48 #include <machine/psl.h>
49 
50 struct thread;
51 struct region_descriptor;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #ifdef SMP
67 #include <machine/lock.h>		/* XXX */
68 #endif
69 
70 static __inline void
71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
77 cpu_pause(void)
78 {
79 	__asm __volatile("pause");
80 }
81 
82 static __inline u_int
83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_long
101 bsflong(u_long mask)
102 {
103 	u_long	result;
104 
105 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_int
110 bsrl(u_int mask)
111 {
112 	u_int	result;
113 
114 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline u_long
119 bsrq(u_long mask)
120 {
121 	u_long	result;
122 
123 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
124 	return (result);
125 }
126 
127 static __inline void
128 do_cpuid(u_int ax, u_int *p)
129 {
130 	__asm __volatile("cpuid"
131 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
132 			 :  "0" (ax));
133 }
134 
135 static __inline void
136 cpuid_count(u_int ax, u_int cx, u_int *p)
137 {
138 	__asm __volatile("cpuid"
139 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
140 			 :  "0" (ax), "c" (cx));
141 }
142 
143 #ifndef _CPU_DISABLE_INTR_DEFINED
144 
145 static __inline void
146 cpu_disable_intr(void)
147 {
148 	__asm __volatile("cli" : : : "memory");
149 }
150 
151 #endif
152 
153 #ifndef _CPU_ENABLE_INTR_DEFINED
154 
155 static __inline void
156 cpu_enable_intr(void)
157 {
158 	__asm __volatile("sti");
159 }
160 
161 #endif
162 
163 /*
164  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
165  * write ordering.
166  *
167  * A serializing or fence instruction is required here.  A locked bus
168  * cycle on data for which we already own cache mastership is the most
169  * portable.
170  */
171 static __inline void
172 cpu_mfence(void)
173 {
174 #ifdef SMP
175 	__asm __volatile("mfence" : : : "memory");
176 #else
177 	__asm __volatile("" : : : "memory");
178 #endif
179 }
180 
181 /*
182  * cpu_lfence() ensures strong read ordering for reads issued prior
183  * to the instruction verses reads issued afterwords.
184  *
185  * A serializing or fence instruction is required here.  A locked bus
186  * cycle on data for which we already own cache mastership is the most
187  * portable.
188  */
189 static __inline void
190 cpu_lfence(void)
191 {
192 #ifdef SMP
193 	__asm __volatile("lfence" : : : "memory");
194 #else
195 	__asm __volatile("" : : : "memory");
196 #endif
197 }
198 
199 /*
200  * cpu_sfence() ensures strong write ordering for writes issued prior
201  * to the instruction verses writes issued afterwords.  Writes are
202  * ordered on intel cpus so we do not actually have to do anything.
203  */
204 static __inline void
205 cpu_sfence(void)
206 {
207 	/*
208 	 * NOTE:
209 	 * Don't use 'sfence' here, as it will create a lot of
210 	 * unnecessary stalls.
211 	 */
212 	__asm __volatile("" : : : "memory");
213 }
214 
215 /*
216  * cpu_ccfence() prevents the compiler from reordering instructions, in
217  * particular stores, relative to the current cpu.  Use cpu_sfence() if
218  * you need to guarentee ordering by both the compiler and by the cpu.
219  *
220  * This also prevents the compiler from caching memory loads into local
221  * variables across the routine.
222  */
223 static __inline void
224 cpu_ccfence(void)
225 {
226 	__asm __volatile("" : : : "memory");
227 }
228 
229 #ifdef _KERNEL
230 
231 #define	HAVE_INLINE_FFS
232 
233 static __inline int
234 ffs(int mask)
235 {
236 #if 0
237 	/*
238 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
239 	 * this inline or turn off the builtin.  The builtin is faster but
240 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
241 	 * versions.
242 	 */
243 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
244 #else
245 	/* Actually, the above is way out of date.  The builtins use cmov etc */
246 	return (__builtin_ffs(mask));
247 #endif
248 }
249 
250 #define	HAVE_INLINE_FFSL
251 
252 static __inline int
253 ffsl(long mask)
254 {
255 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
256 }
257 
258 #define	HAVE_INLINE_FLS
259 
260 static __inline int
261 fls(int mask)
262 {
263 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
264 }
265 
266 #define	HAVE_INLINE_FLSL
267 
268 static __inline int
269 flsl(long mask)
270 {
271 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
272 }
273 
274 #endif /* _KERNEL */
275 
276 static __inline void
277 halt(void)
278 {
279 	__asm __volatile("hlt");
280 }
281 
282 /*
283  * The following complications are to get around gcc not having a
284  * constraint letter for the range 0..255.  We still put "d" in the
285  * constraint because "i" isn't a valid constraint when the port
286  * isn't constant.  This only matters for -O0 because otherwise
287  * the non-working version gets optimized away.
288  *
289  * Use an expression-statement instead of a conditional expression
290  * because gcc-2.6.0 would promote the operands of the conditional
291  * and produce poor code for "if ((inb(var) & const1) == const2)".
292  *
293  * The unnecessary test `(port) < 0x10000' is to generate a warning if
294  * the `port' has type u_short or smaller.  Such types are pessimal.
295  * This actually only works for signed types.  The range check is
296  * careful to avoid generating warnings.
297  */
298 #define	inb(port) __extension__ ({					\
299 	u_char	_data;							\
300 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
301 	    && (port) < 0x10000)					\
302 		_data = inbc(port);					\
303 	else								\
304 		_data = inbv(port);					\
305 	_data; })
306 
307 #define	outb(port, data) (						\
308 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
309 	&& (port) < 0x10000						\
310 	? outbc(port, data) : outbv(port, data))
311 
312 static __inline u_char
313 inbc(u_int port)
314 {
315 	u_char	data;
316 
317 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
318 	return (data);
319 }
320 
321 static __inline void
322 outbc(u_int port, u_char data)
323 {
324 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
325 }
326 
327 static __inline u_char
328 inbv(u_int port)
329 {
330 	u_char	data;
331 	/*
332 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
333 	 * %edx, while gcc generates inferior code (movw instead of movl)
334 	 * if we tell it to load (u_short) port.
335 	 */
336 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
337 	return (data);
338 }
339 
340 static __inline u_int
341 inl(u_int port)
342 {
343 	u_int	data;
344 
345 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
346 	return (data);
347 }
348 
349 static __inline void
350 insb(u_int port, void *addr, size_t cnt)
351 {
352 	__asm __volatile("cld; rep; insb"
353 			 : "+D" (addr), "+c" (cnt)
354 			 : "d" (port)
355 			 : "memory");
356 }
357 
358 static __inline void
359 insw(u_int port, void *addr, size_t cnt)
360 {
361 	__asm __volatile("cld; rep; insw"
362 			 : "+D" (addr), "+c" (cnt)
363 			 : "d" (port)
364 			 : "memory");
365 }
366 
367 static __inline void
368 insl(u_int port, void *addr, size_t cnt)
369 {
370 	__asm __volatile("cld; rep; insl"
371 			 : "+D" (addr), "+c" (cnt)
372 			 : "d" (port)
373 			 : "memory");
374 }
375 
376 static __inline void
377 invd(void)
378 {
379 	__asm __volatile("invd");
380 }
381 
382 #if defined(_KERNEL)
383 
384 /*
385  * If we are not a true-SMP box then smp_invltlb() is a NOP.  Note that this
386  * will cause the invl*() functions to be equivalent to the cpu_invl*()
387  * functions.
388  */
389 #ifdef SMP
390 void smp_invltlb(void);
391 void smp_invltlb_intr(void);
392 #else
393 #define smp_invltlb()
394 #endif
395 
396 #ifndef _CPU_INVLPG_DEFINED
397 
398 /*
399  * Invalidate a patricular VA on this cpu only
400  */
401 static __inline void
402 cpu_invlpg(void *addr)
403 {
404 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
405 }
406 
407 #endif
408 
409 static __inline void
410 cpu_nop(void)
411 {
412 	__asm __volatile("rep; nop");
413 }
414 
415 #endif	/* _KERNEL */
416 
417 static __inline u_short
418 inw(u_int port)
419 {
420 	u_short	data;
421 
422 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
423 	return (data);
424 }
425 
426 static __inline u_int
427 loadandclear(volatile u_int *addr)
428 {
429 	u_int   result;
430 
431 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
432 			: "=&r" (result) : "m" (*addr));
433 	return (result);
434 }
435 
436 static __inline void
437 outbv(u_int port, u_char data)
438 {
439 	u_char	al;
440 	/*
441 	 * Use an unnecessary assignment to help gcc's register allocator.
442 	 * This make a large difference for gcc-1.40 and a tiny difference
443 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
444 	 * best results.  gcc-2.6.0 can't handle this.
445 	 */
446 	al = data;
447 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
448 }
449 
450 static __inline void
451 outl(u_int port, u_int data)
452 {
453 	/*
454 	 * outl() and outw() aren't used much so we haven't looked at
455 	 * possible micro-optimizations such as the unnecessary
456 	 * assignment for them.
457 	 */
458 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
459 }
460 
461 static __inline void
462 outsb(u_int port, const void *addr, size_t cnt)
463 {
464 	__asm __volatile("cld; rep; outsb"
465 			 : "+S" (addr), "+c" (cnt)
466 			 : "d" (port));
467 }
468 
469 static __inline void
470 outsw(u_int port, const void *addr, size_t cnt)
471 {
472 	__asm __volatile("cld; rep; outsw"
473 			 : "+S" (addr), "+c" (cnt)
474 			 : "d" (port));
475 }
476 
477 static __inline void
478 outsl(u_int port, const void *addr, size_t cnt)
479 {
480 	__asm __volatile("cld; rep; outsl"
481 			 : "+S" (addr), "+c" (cnt)
482 			 : "d" (port));
483 }
484 
485 static __inline void
486 outw(u_int port, u_short data)
487 {
488 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
489 }
490 
491 static __inline void
492 ia32_pause(void)
493 {
494 	__asm __volatile("pause");
495 }
496 
497 static __inline u_long
498 read_rflags(void)
499 {
500 	u_long	rf;
501 
502 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
503 	return (rf);
504 }
505 
506 static __inline u_int64_t
507 rdmsr(u_int msr)
508 {
509 	u_int32_t low, high;
510 
511 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
512 	return (low | ((u_int64_t)high << 32));
513 }
514 
515 static __inline u_int64_t
516 rdpmc(u_int pmc)
517 {
518 	u_int32_t low, high;
519 
520 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
521 	return (low | ((u_int64_t)high << 32));
522 }
523 
524 #define _RDTSC_SUPPORTED_
525 
526 static __inline u_int64_t
527 rdtsc(void)
528 {
529 	u_int32_t low, high;
530 
531 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
532 	return (low | ((u_int64_t)high << 32));
533 }
534 
535 static __inline void
536 wbinvd(void)
537 {
538 	__asm __volatile("wbinvd");
539 }
540 
541 static __inline void
542 write_rflags(u_long rf)
543 {
544 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
545 }
546 
547 static __inline void
548 wrmsr(u_int msr, u_int64_t newval)
549 {
550 	u_int32_t low, high;
551 
552 	low = newval;
553 	high = newval >> 32;
554 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
555 }
556 
557 static __inline void
558 load_cr0(u_long data)
559 {
560 
561 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
562 }
563 
564 static __inline u_long
565 rcr0(void)
566 {
567 	u_long	data;
568 
569 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
570 	return (data);
571 }
572 
573 static __inline u_long
574 rcr2(void)
575 {
576 	u_long	data;
577 
578 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
579 	return (data);
580 }
581 
582 static __inline void
583 load_cr3(u_long data)
584 {
585 
586 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
587 }
588 
589 static __inline u_long
590 rcr3(void)
591 {
592 	u_long	data;
593 
594 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
595 	return (data);
596 }
597 
598 static __inline void
599 load_cr4(u_long data)
600 {
601 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
602 }
603 
604 static __inline u_long
605 rcr4(void)
606 {
607 	u_long	data;
608 
609 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
610 	return (data);
611 }
612 
613 #ifndef _CPU_INVLTLB_DEFINED
614 
615 /*
616  * Invalidate the TLB on this cpu only
617  */
618 static __inline void
619 cpu_invltlb(void)
620 {
621 	load_cr3(rcr3());
622 #if defined(SWTCH_OPTIM_STATS)
623 	++tlb_flush_count;
624 #endif
625 }
626 
627 #endif
628 
629 /*
630  * TLB flush for an individual page (even if it has PG_G).
631  * Only works on 486+ CPUs (i386 does not have PG_G).
632  */
633 static __inline void
634 invlpg(u_long addr)
635 {
636 
637 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
638 }
639 
640 static __inline u_short
641 rfs(void)
642 {
643 	u_short sel;
644 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
645 	return (sel);
646 }
647 
648 static __inline u_short
649 rgs(void)
650 {
651 	u_short sel;
652 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
653 	return (sel);
654 }
655 
656 static __inline void
657 load_ds(u_short sel)
658 {
659 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
660 }
661 
662 static __inline void
663 load_es(u_short sel)
664 {
665 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
666 }
667 
668 #ifdef _KERNEL
669 /* This is defined in <machine/specialreg.h> but is too painful to get to */
670 #ifndef	MSR_FSBASE
671 #define	MSR_FSBASE	0xc0000100
672 #endif
673 static __inline void
674 load_fs(u_short sel)
675 {
676 	/* Preserve the fsbase value across the selector load */
677 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
678             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
679 }
680 
681 #ifndef	MSR_GSBASE
682 #define	MSR_GSBASE	0xc0000101
683 #endif
684 static __inline void
685 load_gs(u_short sel)
686 {
687 	/*
688 	 * Preserve the gsbase value across the selector load.
689 	 * Note that we have to disable interrupts because the gsbase
690 	 * being trashed happens to be the kernel gsbase at the time.
691 	 */
692 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
693             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
694 }
695 #else
696 /* Usable by userland */
697 static __inline void
698 load_fs(u_short sel)
699 {
700 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
701 }
702 
703 static __inline void
704 load_gs(u_short sel)
705 {
706 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
707 }
708 #endif
709 
710 /* void lidt(struct region_descriptor *addr); */
711 static __inline void
712 lidt(struct region_descriptor *addr)
713 {
714 	__asm __volatile("lidt (%0)" : : "r" (addr));
715 }
716 
717 /* void lldt(u_short sel); */
718 static __inline void
719 lldt(u_short sel)
720 {
721 	__asm __volatile("lldt %0" : : "r" (sel));
722 }
723 
724 /* void ltr(u_short sel); */
725 static __inline void
726 ltr(u_short sel)
727 {
728 	__asm __volatile("ltr %0" : : "r" (sel));
729 }
730 
731 static __inline u_int64_t
732 rdr0(void)
733 {
734 	u_int64_t data;
735 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
736 	return (data);
737 }
738 
739 static __inline void
740 load_dr0(u_int64_t dr0)
741 {
742 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
743 }
744 
745 static __inline u_int64_t
746 rdr1(void)
747 {
748 	u_int64_t data;
749 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
750 	return (data);
751 }
752 
753 static __inline void
754 load_dr1(u_int64_t dr1)
755 {
756 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
757 }
758 
759 static __inline u_int64_t
760 rdr2(void)
761 {
762 	u_int64_t data;
763 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
764 	return (data);
765 }
766 
767 static __inline void
768 load_dr2(u_int64_t dr2)
769 {
770 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
771 }
772 
773 static __inline u_int64_t
774 rdr3(void)
775 {
776 	u_int64_t data;
777 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
778 	return (data);
779 }
780 
781 static __inline void
782 load_dr3(u_int64_t dr3)
783 {
784 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
785 }
786 
787 static __inline u_int64_t
788 rdr4(void)
789 {
790 	u_int64_t data;
791 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
792 	return (data);
793 }
794 
795 static __inline void
796 load_dr4(u_int64_t dr4)
797 {
798 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
799 }
800 
801 static __inline u_int64_t
802 rdr5(void)
803 {
804 	u_int64_t data;
805 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
806 	return (data);
807 }
808 
809 static __inline void
810 load_dr5(u_int64_t dr5)
811 {
812 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
813 }
814 
815 static __inline u_int64_t
816 rdr6(void)
817 {
818 	u_int64_t data;
819 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
820 	return (data);
821 }
822 
823 static __inline void
824 load_dr6(u_int64_t dr6)
825 {
826 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
827 }
828 
829 static __inline u_int64_t
830 rdr7(void)
831 {
832 	u_int64_t data;
833 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
834 	return (data);
835 }
836 
837 static __inline void
838 load_dr7(u_int64_t dr7)
839 {
840 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
841 }
842 
843 static __inline register_t
844 intr_disable(void)
845 {
846 	register_t rflags;
847 
848 	rflags = read_rflags();
849 	cpu_disable_intr();
850 	return (rflags);
851 }
852 
853 static __inline void
854 intr_restore(register_t rflags)
855 {
856 	write_rflags(rflags);
857 }
858 
859 #else /* !__GNUC__ */
860 
861 int	breakpoint(void);
862 void	cpu_pause(void);
863 u_int	bsfl(u_int mask);
864 u_int	bsrl(u_int mask);
865 void	cpu_disable_intr(void);
866 void	cpu_enable_intr(void);
867 void	cpu_invlpg(u_long addr);
868 void	cpu_invlpg_range(u_long start, u_long end);
869 void	do_cpuid(u_int ax, u_int *p);
870 void	halt(void);
871 u_char	inb(u_int port);
872 u_int	inl(u_int port);
873 void	insb(u_int port, void *addr, size_t cnt);
874 void	insl(u_int port, void *addr, size_t cnt);
875 void	insw(u_int port, void *addr, size_t cnt);
876 void	invd(void);
877 void	invlpg(u_int addr);
878 void	invlpg_range(u_int start, u_int end);
879 void	cpu_invltlb(void);
880 u_short	inw(u_int port);
881 void	load_cr0(u_int cr0);
882 void	load_cr3(u_int cr3);
883 void	load_cr4(u_int cr4);
884 void	load_fs(u_int sel);
885 void	load_gs(u_int sel);
886 struct region_descriptor;
887 void	lidt(struct region_descriptor *addr);
888 void	lldt(u_short sel);
889 void	ltr(u_short sel);
890 void	outb(u_int port, u_char data);
891 void	outl(u_int port, u_int data);
892 void	outsb(u_int port, void *addr, size_t cnt);
893 void	outsl(u_int port, void *addr, size_t cnt);
894 void	outsw(u_int port, void *addr, size_t cnt);
895 void	outw(u_int port, u_short data);
896 void	ia32_pause(void);
897 u_int	rcr0(void);
898 u_int	rcr2(void);
899 u_int	rcr3(void);
900 u_int	rcr4(void);
901 u_short	rfs(void);
902 u_short	rgs(void);
903 u_int64_t rdmsr(u_int msr);
904 u_int64_t rdpmc(u_int pmc);
905 u_int64_t rdtsc(void);
906 u_int	read_rflags(void);
907 void	wbinvd(void);
908 void	write_rflags(u_int rf);
909 void	wrmsr(u_int msr, u_int64_t newval);
910 u_int64_t	rdr0(void);
911 void	load_dr0(u_int64_t dr0);
912 u_int64_t	rdr1(void);
913 void	load_dr1(u_int64_t dr1);
914 u_int64_t	rdr2(void);
915 void	load_dr2(u_int64_t dr2);
916 u_int64_t	rdr3(void);
917 void	load_dr3(u_int64_t dr3);
918 u_int64_t	rdr4(void);
919 void	load_dr4(u_int64_t dr4);
920 u_int64_t	rdr5(void);
921 void	load_dr5(u_int64_t dr5);
922 u_int64_t	rdr6(void);
923 void	load_dr6(u_int64_t dr6);
924 u_int64_t	rdr7(void);
925 void	load_dr7(u_int64_t dr7);
926 register_t	intr_disable(void);
927 void	intr_restore(register_t rf);
928 
929 #endif	/* __GNUC__ */
930 
931 int	rdmsr_safe(u_int msr, uint64_t *val);
932 void	reset_dbregs(void);
933 
934 __END_DECLS
935 
936 #endif /* !_CPU_CPUFUNC_H_ */
937