xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision 10f4bf95)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36  */
37 
38 /*
39  * Functions to provide access to special i386 instructions.
40  * This in included in sys/systm.h, and that file should be
41  * used in preference to this.
42  */
43 
44 #ifndef _CPU_CPUFUNC_H_
45 #define	_CPU_CPUFUNC_H_
46 
47 #include <sys/cdefs.h>
48 #include <machine/psl.h>
49 
50 struct thread;
51 struct region_descriptor;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #ifdef SMP
67 #include <machine/lock.h>		/* XXX */
68 #endif
69 
70 static __inline void
71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
77 cpu_pause(void)
78 {
79 	__asm __volatile("pause");
80 }
81 
82 static __inline u_int
83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_int
101 bsrl(u_int mask)
102 {
103 	u_int	result;
104 
105 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_long
110 bsrq(u_long mask)
111 {
112 	u_long	result;
113 
114 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline void
119 do_cpuid(u_int ax, u_int *p)
120 {
121 	__asm __volatile("cpuid"
122 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
123 			 :  "0" (ax));
124 }
125 
126 static __inline void
127 cpuid_count(u_int ax, u_int cx, u_int *p)
128 {
129 	__asm __volatile("cpuid"
130 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
131 			 :  "0" (ax), "c" (cx));
132 }
133 
134 #ifndef _CPU_DISABLE_INTR_DEFINED
135 
136 static __inline void
137 cpu_disable_intr(void)
138 {
139 	__asm __volatile("cli" : : : "memory");
140 }
141 
142 #endif
143 
144 #ifndef _CPU_ENABLE_INTR_DEFINED
145 
146 static __inline void
147 cpu_enable_intr(void)
148 {
149 	__asm __volatile("sti");
150 }
151 
152 #endif
153 
154 /*
155  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
156  * write ordering.
157  *
158  * A serializing or fence instruction is required here.  A locked bus
159  * cycle on data for which we already own cache mastership is the most
160  * portable.
161  */
162 static __inline void
163 cpu_mfence(void)
164 {
165 #ifdef SMP
166 	__asm __volatile("mfence" : : : "memory");
167 #else
168 	__asm __volatile("" : : : "memory");
169 #endif
170 }
171 
172 /*
173  * cpu_lfence() ensures strong read ordering for reads issued prior
174  * to the instruction verses reads issued afterwords.
175  *
176  * A serializing or fence instruction is required here.  A locked bus
177  * cycle on data for which we already own cache mastership is the most
178  * portable.
179  */
180 static __inline void
181 cpu_lfence(void)
182 {
183 #ifdef SMP
184 	__asm __volatile("lfence" : : : "memory");
185 #else
186 	__asm __volatile("" : : : "memory");
187 #endif
188 }
189 
190 /*
191  * cpu_sfence() ensures strong write ordering for writes issued prior
192  * to the instruction verses writes issued afterwords.  Writes are
193  * ordered on intel cpus so we do not actually have to do anything.
194  */
195 static __inline void
196 cpu_sfence(void)
197 {
198 	/*
199 	 * NOTE:
200 	 * Don't use 'sfence' here, as it will create a lot of
201 	 * unnecessary stalls.
202 	 */
203 	__asm __volatile("" : : : "memory");
204 }
205 
206 /*
207  * cpu_ccfence() prevents the compiler from reordering instructions, in
208  * particular stores, relative to the current cpu.  Use cpu_sfence() if
209  * you need to guarentee ordering by both the compiler and by the cpu.
210  *
211  * This also prevents the compiler from caching memory loads into local
212  * variables across the routine.
213  */
214 static __inline void
215 cpu_ccfence(void)
216 {
217 	__asm __volatile("" : : : "memory");
218 }
219 
220 #ifdef _KERNEL
221 
222 #define	HAVE_INLINE_FFS
223 
224 static __inline int
225 ffs(int mask)
226 {
227 #if 0
228 	/*
229 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
230 	 * this inline or turn off the builtin.  The builtin is faster but
231 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
232 	 * versions.
233 	 */
234 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
235 #else
236 	/* Actually, the above is way out of date.  The builtins use cmov etc */
237 	return (__builtin_ffs(mask));
238 #endif
239 }
240 
241 #define	HAVE_INLINE_FFSL
242 
243 static __inline int
244 ffsl(long mask)
245 {
246 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
247 }
248 
249 #define	HAVE_INLINE_FLS
250 
251 static __inline int
252 fls(int mask)
253 {
254 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
255 }
256 
257 #define	HAVE_INLINE_FLSL
258 
259 static __inline int
260 flsl(long mask)
261 {
262 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
263 }
264 
265 #endif /* _KERNEL */
266 
267 static __inline void
268 halt(void)
269 {
270 	__asm __volatile("hlt");
271 }
272 
273 /*
274  * The following complications are to get around gcc not having a
275  * constraint letter for the range 0..255.  We still put "d" in the
276  * constraint because "i" isn't a valid constraint when the port
277  * isn't constant.  This only matters for -O0 because otherwise
278  * the non-working version gets optimized away.
279  *
280  * Use an expression-statement instead of a conditional expression
281  * because gcc-2.6.0 would promote the operands of the conditional
282  * and produce poor code for "if ((inb(var) & const1) == const2)".
283  *
284  * The unnecessary test `(port) < 0x10000' is to generate a warning if
285  * the `port' has type u_short or smaller.  Such types are pessimal.
286  * This actually only works for signed types.  The range check is
287  * careful to avoid generating warnings.
288  */
289 #define	inb(port) __extension__ ({					\
290 	u_char	_data;							\
291 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
292 	    && (port) < 0x10000)					\
293 		_data = inbc(port);					\
294 	else								\
295 		_data = inbv(port);					\
296 	_data; })
297 
298 #define	outb(port, data) (						\
299 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
300 	&& (port) < 0x10000						\
301 	? outbc(port, data) : outbv(port, data))
302 
303 static __inline u_char
304 inbc(u_int port)
305 {
306 	u_char	data;
307 
308 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
309 	return (data);
310 }
311 
312 static __inline void
313 outbc(u_int port, u_char data)
314 {
315 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
316 }
317 
318 static __inline u_char
319 inbv(u_int port)
320 {
321 	u_char	data;
322 	/*
323 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
324 	 * %edx, while gcc generates inferior code (movw instead of movl)
325 	 * if we tell it to load (u_short) port.
326 	 */
327 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
328 	return (data);
329 }
330 
331 static __inline u_int
332 inl(u_int port)
333 {
334 	u_int	data;
335 
336 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
337 	return (data);
338 }
339 
340 static __inline void
341 insb(u_int port, void *addr, size_t cnt)
342 {
343 	__asm __volatile("cld; rep; insb"
344 			 : "+D" (addr), "+c" (cnt)
345 			 : "d" (port)
346 			 : "memory");
347 }
348 
349 static __inline void
350 insw(u_int port, void *addr, size_t cnt)
351 {
352 	__asm __volatile("cld; rep; insw"
353 			 : "+D" (addr), "+c" (cnt)
354 			 : "d" (port)
355 			 : "memory");
356 }
357 
358 static __inline void
359 insl(u_int port, void *addr, size_t cnt)
360 {
361 	__asm __volatile("cld; rep; insl"
362 			 : "+D" (addr), "+c" (cnt)
363 			 : "d" (port)
364 			 : "memory");
365 }
366 
367 static __inline void
368 invd(void)
369 {
370 	__asm __volatile("invd");
371 }
372 
373 #if defined(_KERNEL)
374 
375 /*
376  * If we are not a true-SMP box then smp_invltlb() is a NOP.  Note that this
377  * will cause the invl*() functions to be equivalent to the cpu_invl*()
378  * functions.
379  */
380 #ifdef SMP
381 void smp_invltlb(void);
382 void smp_invltlb_intr(void);
383 #else
384 #define smp_invltlb()
385 #endif
386 
387 #ifndef _CPU_INVLPG_DEFINED
388 
389 /*
390  * Invalidate a patricular VA on this cpu only
391  */
392 static __inline void
393 cpu_invlpg(void *addr)
394 {
395 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
396 }
397 
398 #endif
399 
400 static __inline void
401 cpu_nop(void)
402 {
403 	__asm __volatile("rep; nop");
404 }
405 
406 #endif	/* _KERNEL */
407 
408 static __inline u_short
409 inw(u_int port)
410 {
411 	u_short	data;
412 
413 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
414 	return (data);
415 }
416 
417 static __inline u_int
418 loadandclear(volatile u_int *addr)
419 {
420 	u_int   result;
421 
422 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
423 			: "=&r" (result) : "m" (*addr));
424 	return (result);
425 }
426 
427 static __inline void
428 outbv(u_int port, u_char data)
429 {
430 	u_char	al;
431 	/*
432 	 * Use an unnecessary assignment to help gcc's register allocator.
433 	 * This make a large difference for gcc-1.40 and a tiny difference
434 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
435 	 * best results.  gcc-2.6.0 can't handle this.
436 	 */
437 	al = data;
438 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
439 }
440 
441 static __inline void
442 outl(u_int port, u_int data)
443 {
444 	/*
445 	 * outl() and outw() aren't used much so we haven't looked at
446 	 * possible micro-optimizations such as the unnecessary
447 	 * assignment for them.
448 	 */
449 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
450 }
451 
452 static __inline void
453 outsb(u_int port, const void *addr, size_t cnt)
454 {
455 	__asm __volatile("cld; rep; outsb"
456 			 : "+S" (addr), "+c" (cnt)
457 			 : "d" (port));
458 }
459 
460 static __inline void
461 outsw(u_int port, const void *addr, size_t cnt)
462 {
463 	__asm __volatile("cld; rep; outsw"
464 			 : "+S" (addr), "+c" (cnt)
465 			 : "d" (port));
466 }
467 
468 static __inline void
469 outsl(u_int port, const void *addr, size_t cnt)
470 {
471 	__asm __volatile("cld; rep; outsl"
472 			 : "+S" (addr), "+c" (cnt)
473 			 : "d" (port));
474 }
475 
476 static __inline void
477 outw(u_int port, u_short data)
478 {
479 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
480 }
481 
482 static __inline void
483 ia32_pause(void)
484 {
485 	__asm __volatile("pause");
486 }
487 
488 static __inline u_long
489 read_rflags(void)
490 {
491 	u_long	rf;
492 
493 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
494 	return (rf);
495 }
496 
497 static __inline u_int64_t
498 rdmsr(u_int msr)
499 {
500 	u_int32_t low, high;
501 
502 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
503 	return (low | ((u_int64_t)high << 32));
504 }
505 
506 static __inline u_int64_t
507 rdpmc(u_int pmc)
508 {
509 	u_int32_t low, high;
510 
511 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
512 	return (low | ((u_int64_t)high << 32));
513 }
514 
515 #define _RDTSC_SUPPORTED_
516 
517 static __inline u_int64_t
518 rdtsc(void)
519 {
520 	u_int32_t low, high;
521 
522 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
523 	return (low | ((u_int64_t)high << 32));
524 }
525 
526 static __inline void
527 wbinvd(void)
528 {
529 	__asm __volatile("wbinvd");
530 }
531 
532 static __inline void
533 write_rflags(u_long rf)
534 {
535 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
536 }
537 
538 static __inline void
539 wrmsr(u_int msr, u_int64_t newval)
540 {
541 	u_int32_t low, high;
542 
543 	low = newval;
544 	high = newval >> 32;
545 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
546 }
547 
548 static __inline void
549 load_cr0(u_long data)
550 {
551 
552 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
553 }
554 
555 static __inline u_long
556 rcr0(void)
557 {
558 	u_long	data;
559 
560 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
561 	return (data);
562 }
563 
564 static __inline u_long
565 rcr2(void)
566 {
567 	u_long	data;
568 
569 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
570 	return (data);
571 }
572 
573 static __inline void
574 load_cr3(u_long data)
575 {
576 
577 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
578 }
579 
580 static __inline u_long
581 rcr3(void)
582 {
583 	u_long	data;
584 
585 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
586 	return (data);
587 }
588 
589 static __inline void
590 load_cr4(u_long data)
591 {
592 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
593 }
594 
595 static __inline u_long
596 rcr4(void)
597 {
598 	u_long	data;
599 
600 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
601 	return (data);
602 }
603 
604 #ifndef _CPU_INVLTLB_DEFINED
605 
606 /*
607  * Invalidate the TLB on this cpu only
608  */
609 static __inline void
610 cpu_invltlb(void)
611 {
612 	load_cr3(rcr3());
613 #if defined(SWTCH_OPTIM_STATS)
614 	++tlb_flush_count;
615 #endif
616 }
617 
618 #endif
619 
620 /*
621  * TLB flush for an individual page (even if it has PG_G).
622  * Only works on 486+ CPUs (i386 does not have PG_G).
623  */
624 static __inline void
625 invlpg(u_long addr)
626 {
627 
628 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
629 }
630 
631 static __inline u_short
632 rfs(void)
633 {
634 	u_short sel;
635 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
636 	return (sel);
637 }
638 
639 static __inline u_short
640 rgs(void)
641 {
642 	u_short sel;
643 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
644 	return (sel);
645 }
646 
647 static __inline void
648 load_ds(u_short sel)
649 {
650 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
651 }
652 
653 static __inline void
654 load_es(u_short sel)
655 {
656 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
657 }
658 
659 #ifdef _KERNEL
660 /* This is defined in <machine/specialreg.h> but is too painful to get to */
661 #ifndef	MSR_FSBASE
662 #define	MSR_FSBASE	0xc0000100
663 #endif
664 static __inline void
665 load_fs(u_short sel)
666 {
667 	/* Preserve the fsbase value across the selector load */
668 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
669             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
670 }
671 
672 #ifndef	MSR_GSBASE
673 #define	MSR_GSBASE	0xc0000101
674 #endif
675 static __inline void
676 load_gs(u_short sel)
677 {
678 	/*
679 	 * Preserve the gsbase value across the selector load.
680 	 * Note that we have to disable interrupts because the gsbase
681 	 * being trashed happens to be the kernel gsbase at the time.
682 	 */
683 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
684             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
685 }
686 #else
687 /* Usable by userland */
688 static __inline void
689 load_fs(u_short sel)
690 {
691 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
692 }
693 
694 static __inline void
695 load_gs(u_short sel)
696 {
697 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
698 }
699 #endif
700 
701 /* void lidt(struct region_descriptor *addr); */
702 static __inline void
703 lidt(struct region_descriptor *addr)
704 {
705 	__asm __volatile("lidt (%0)" : : "r" (addr));
706 }
707 
708 /* void lldt(u_short sel); */
709 static __inline void
710 lldt(u_short sel)
711 {
712 	__asm __volatile("lldt %0" : : "r" (sel));
713 }
714 
715 /* void ltr(u_short sel); */
716 static __inline void
717 ltr(u_short sel)
718 {
719 	__asm __volatile("ltr %0" : : "r" (sel));
720 }
721 
722 static __inline u_int64_t
723 rdr0(void)
724 {
725 	u_int64_t data;
726 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
727 	return (data);
728 }
729 
730 static __inline void
731 load_dr0(u_int64_t dr0)
732 {
733 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
734 }
735 
736 static __inline u_int64_t
737 rdr1(void)
738 {
739 	u_int64_t data;
740 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
741 	return (data);
742 }
743 
744 static __inline void
745 load_dr1(u_int64_t dr1)
746 {
747 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
748 }
749 
750 static __inline u_int64_t
751 rdr2(void)
752 {
753 	u_int64_t data;
754 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
755 	return (data);
756 }
757 
758 static __inline void
759 load_dr2(u_int64_t dr2)
760 {
761 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
762 }
763 
764 static __inline u_int64_t
765 rdr3(void)
766 {
767 	u_int64_t data;
768 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
769 	return (data);
770 }
771 
772 static __inline void
773 load_dr3(u_int64_t dr3)
774 {
775 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
776 }
777 
778 static __inline u_int64_t
779 rdr4(void)
780 {
781 	u_int64_t data;
782 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
783 	return (data);
784 }
785 
786 static __inline void
787 load_dr4(u_int64_t dr4)
788 {
789 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
790 }
791 
792 static __inline u_int64_t
793 rdr5(void)
794 {
795 	u_int64_t data;
796 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
797 	return (data);
798 }
799 
800 static __inline void
801 load_dr5(u_int64_t dr5)
802 {
803 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
804 }
805 
806 static __inline u_int64_t
807 rdr6(void)
808 {
809 	u_int64_t data;
810 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
811 	return (data);
812 }
813 
814 static __inline void
815 load_dr6(u_int64_t dr6)
816 {
817 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
818 }
819 
820 static __inline u_int64_t
821 rdr7(void)
822 {
823 	u_int64_t data;
824 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
825 	return (data);
826 }
827 
828 static __inline void
829 load_dr7(u_int64_t dr7)
830 {
831 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
832 }
833 
834 static __inline register_t
835 intr_disable(void)
836 {
837 	register_t rflags;
838 
839 	rflags = read_rflags();
840 	cpu_disable_intr();
841 	return (rflags);
842 }
843 
844 static __inline void
845 intr_restore(register_t rflags)
846 {
847 	write_rflags(rflags);
848 }
849 
850 #else /* !__GNUC__ */
851 
852 int	breakpoint(void);
853 void	cpu_pause(void);
854 u_int	bsfl(u_int mask);
855 u_int	bsrl(u_int mask);
856 void	cpu_disable_intr(void);
857 void	cpu_enable_intr(void);
858 void	cpu_invlpg(u_long addr);
859 void	cpu_invlpg_range(u_long start, u_long end);
860 void	do_cpuid(u_int ax, u_int *p);
861 void	halt(void);
862 u_char	inb(u_int port);
863 u_int	inl(u_int port);
864 void	insb(u_int port, void *addr, size_t cnt);
865 void	insl(u_int port, void *addr, size_t cnt);
866 void	insw(u_int port, void *addr, size_t cnt);
867 void	invd(void);
868 void	invlpg(u_int addr);
869 void	invlpg_range(u_int start, u_int end);
870 void	cpu_invltlb(void);
871 u_short	inw(u_int port);
872 void	load_cr0(u_int cr0);
873 void	load_cr3(u_int cr3);
874 void	load_cr4(u_int cr4);
875 void	load_fs(u_int sel);
876 void	load_gs(u_int sel);
877 struct region_descriptor;
878 void	lidt(struct region_descriptor *addr);
879 void	lldt(u_short sel);
880 void	ltr(u_short sel);
881 void	outb(u_int port, u_char data);
882 void	outl(u_int port, u_int data);
883 void	outsb(u_int port, void *addr, size_t cnt);
884 void	outsl(u_int port, void *addr, size_t cnt);
885 void	outsw(u_int port, void *addr, size_t cnt);
886 void	outw(u_int port, u_short data);
887 void	ia32_pause(void);
888 u_int	rcr0(void);
889 u_int	rcr2(void);
890 u_int	rcr3(void);
891 u_int	rcr4(void);
892 u_short	rfs(void);
893 u_short	rgs(void);
894 u_int64_t rdmsr(u_int msr);
895 u_int64_t rdpmc(u_int pmc);
896 u_int64_t rdtsc(void);
897 u_int	read_rflags(void);
898 void	wbinvd(void);
899 void	write_rflags(u_int rf);
900 void	wrmsr(u_int msr, u_int64_t newval);
901 u_int64_t	rdr0(void);
902 void	load_dr0(u_int64_t dr0);
903 u_int64_t	rdr1(void);
904 void	load_dr1(u_int64_t dr1);
905 u_int64_t	rdr2(void);
906 void	load_dr2(u_int64_t dr2);
907 u_int64_t	rdr3(void);
908 void	load_dr3(u_int64_t dr3);
909 u_int64_t	rdr4(void);
910 void	load_dr4(u_int64_t dr4);
911 u_int64_t	rdr5(void);
912 void	load_dr5(u_int64_t dr5);
913 u_int64_t	rdr6(void);
914 void	load_dr6(u_int64_t dr6);
915 u_int64_t	rdr7(void);
916 void	load_dr7(u_int64_t dr7);
917 register_t	intr_disable(void);
918 void	intr_restore(register_t rf);
919 
920 #endif	/* __GNUC__ */
921 
922 int	rdmsr_safe(u_int msr, uint64_t *val);
923 void	reset_dbregs(void);
924 
925 __END_DECLS
926 
927 #endif /* !_CPU_CPUFUNC_H_ */
928