xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision 10cbe914)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36  */
37 
38 /*
39  * Functions to provide access to special i386 instructions.
40  * This in included in sys/systm.h, and that file should be
41  * used in preference to this.
42  */
43 
44 #ifndef _CPU_CPUFUNC_H_
45 #define	_CPU_CPUFUNC_H_
46 
47 #include <sys/cdefs.h>
48 #include <machine/psl.h>
49 
50 struct thread;
51 struct region_descriptor;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #ifdef SMP
67 #include <machine/lock.h>		/* XXX */
68 #endif
69 
70 static __inline void
71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
77 cpu_pause(void)
78 {
79 	__asm __volatile("pause");
80 }
81 
82 static __inline u_int
83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_int
101 bsrl(u_int mask)
102 {
103 	u_int	result;
104 
105 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_long
110 bsrq(u_long mask)
111 {
112 	u_long	result;
113 
114 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline void
119 do_cpuid(u_int ax, u_int *p)
120 {
121 	__asm __volatile("cpuid"
122 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
123 			 :  "0" (ax));
124 }
125 
126 static __inline void
127 cpuid_count(u_int ax, u_int cx, u_int *p)
128 {
129 	__asm __volatile("cpuid"
130 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
131 			 :  "0" (ax), "c" (cx));
132 }
133 
134 #ifndef _CPU_DISABLE_INTR_DEFINED
135 
136 static __inline void
137 cpu_disable_intr(void)
138 {
139 	__asm __volatile("cli" : : : "memory");
140 }
141 
142 #endif
143 
144 #ifndef _CPU_ENABLE_INTR_DEFINED
145 
146 static __inline void
147 cpu_enable_intr(void)
148 {
149 	__asm __volatile("sti");
150 }
151 
152 #endif
153 
154 /*
155  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
156  * write ordering.
157  *
158  * A serializing or fence instruction is required here.  A locked bus
159  * cycle on data for which we already own cache mastership is the most
160  * portable.
161  */
162 static __inline void
163 cpu_mfence(void)
164 {
165 #ifdef SMP
166 	__asm __volatile("mfence" : : : "memory");
167 #else
168 	__asm __volatile("" : : : "memory");
169 #endif
170 }
171 
172 /*
173  * cpu_lfence() ensures strong read ordering for reads issued prior
174  * to the instruction verses reads issued afterwords.
175  *
176  * A serializing or fence instruction is required here.  A locked bus
177  * cycle on data for which we already own cache mastership is the most
178  * portable.
179  */
180 static __inline void
181 cpu_lfence(void)
182 {
183 #ifdef SMP
184 	__asm __volatile("lfence" : : : "memory");
185 #else
186 	__asm __volatile("" : : : "memory");
187 #endif
188 }
189 
190 /*
191  * cpu_sfence() ensures strong write ordering for writes issued prior
192  * to the instruction verses writes issued afterwords.  Writes are
193  * ordered on intel cpus so we do not actually have to do anything.
194  */
195 static __inline void
196 cpu_sfence(void)
197 {
198 #ifdef SMP
199 	__asm __volatile("sfence" : : : "memory");
200 #else
201 	__asm __volatile("" : : : "memory");
202 #endif
203 }
204 
205 /*
206  * cpu_ccfence() prevents the compiler from reordering instructions, in
207  * particular stores, relative to the current cpu.  Use cpu_sfence() if
208  * you need to guarentee ordering by both the compiler and by the cpu.
209  *
210  * This also prevents the compiler from caching memory loads into local
211  * variables across the routine.
212  */
213 static __inline void
214 cpu_ccfence(void)
215 {
216 	__asm __volatile("" : : : "memory");
217 }
218 
219 #ifdef _KERNEL
220 
221 #define	HAVE_INLINE_FFS
222 
223 static __inline int
224 ffs(int mask)
225 {
226 #if 0
227 	/*
228 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
229 	 * this inline or turn off the builtin.  The builtin is faster but
230 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
231 	 * versions.
232 	 */
233 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
234 #else
235 	/* Actually, the above is way out of date.  The builtins use cmov etc */
236 	return (__builtin_ffs(mask));
237 #endif
238 }
239 
240 #define	HAVE_INLINE_FFSL
241 
242 static __inline int
243 ffsl(long mask)
244 {
245 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
246 }
247 
248 #define	HAVE_INLINE_FLS
249 
250 static __inline int
251 fls(int mask)
252 {
253 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
254 }
255 
256 #define	HAVE_INLINE_FLSL
257 
258 static __inline int
259 flsl(long mask)
260 {
261 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
262 }
263 
264 #endif /* _KERNEL */
265 
266 static __inline void
267 halt(void)
268 {
269 	__asm __volatile("hlt");
270 }
271 
272 /*
273  * The following complications are to get around gcc not having a
274  * constraint letter for the range 0..255.  We still put "d" in the
275  * constraint because "i" isn't a valid constraint when the port
276  * isn't constant.  This only matters for -O0 because otherwise
277  * the non-working version gets optimized away.
278  *
279  * Use an expression-statement instead of a conditional expression
280  * because gcc-2.6.0 would promote the operands of the conditional
281  * and produce poor code for "if ((inb(var) & const1) == const2)".
282  *
283  * The unnecessary test `(port) < 0x10000' is to generate a warning if
284  * the `port' has type u_short or smaller.  Such types are pessimal.
285  * This actually only works for signed types.  The range check is
286  * careful to avoid generating warnings.
287  */
288 #define	inb(port) __extension__ ({					\
289 	u_char	_data;							\
290 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
291 	    && (port) < 0x10000)					\
292 		_data = inbc(port);					\
293 	else								\
294 		_data = inbv(port);					\
295 	_data; })
296 
297 #define	outb(port, data) (						\
298 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
299 	&& (port) < 0x10000						\
300 	? outbc(port, data) : outbv(port, data))
301 
302 static __inline u_char
303 inbc(u_int port)
304 {
305 	u_char	data;
306 
307 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
308 	return (data);
309 }
310 
311 static __inline void
312 outbc(u_int port, u_char data)
313 {
314 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
315 }
316 
317 static __inline u_char
318 inbv(u_int port)
319 {
320 	u_char	data;
321 	/*
322 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
323 	 * %edx, while gcc generates inferior code (movw instead of movl)
324 	 * if we tell it to load (u_short) port.
325 	 */
326 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
327 	return (data);
328 }
329 
330 static __inline u_int
331 inl(u_int port)
332 {
333 	u_int	data;
334 
335 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
336 	return (data);
337 }
338 
339 static __inline void
340 insb(u_int port, void *addr, size_t cnt)
341 {
342 	__asm __volatile("cld; rep; insb"
343 			 : "+D" (addr), "+c" (cnt)
344 			 : "d" (port)
345 			 : "memory");
346 }
347 
348 static __inline void
349 insw(u_int port, void *addr, size_t cnt)
350 {
351 	__asm __volatile("cld; rep; insw"
352 			 : "+D" (addr), "+c" (cnt)
353 			 : "d" (port)
354 			 : "memory");
355 }
356 
357 static __inline void
358 insl(u_int port, void *addr, size_t cnt)
359 {
360 	__asm __volatile("cld; rep; insl"
361 			 : "+D" (addr), "+c" (cnt)
362 			 : "d" (port)
363 			 : "memory");
364 }
365 
366 static __inline void
367 invd(void)
368 {
369 	__asm __volatile("invd");
370 }
371 
372 #if defined(_KERNEL)
373 
374 /*
375  * If we are not a true-SMP box then smp_invltlb() is a NOP.  Note that this
376  * will cause the invl*() functions to be equivalent to the cpu_invl*()
377  * functions.
378  */
379 #ifdef SMP
380 void smp_invltlb(void);
381 void smp_invltlb_intr(void);
382 #else
383 #define smp_invltlb()
384 #endif
385 
386 #ifndef _CPU_INVLPG_DEFINED
387 
388 /*
389  * Invalidate a patricular VA on this cpu only
390  */
391 static __inline void
392 cpu_invlpg(void *addr)
393 {
394 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
395 }
396 
397 #endif
398 
399 static __inline void
400 cpu_nop(void)
401 {
402 	__asm __volatile("rep; nop");
403 }
404 
405 #endif	/* _KERNEL */
406 
407 static __inline u_short
408 inw(u_int port)
409 {
410 	u_short	data;
411 
412 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
413 	return (data);
414 }
415 
416 static __inline u_int
417 loadandclear(volatile u_int *addr)
418 {
419 	u_int   result;
420 
421 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
422 			: "=&r" (result) : "m" (*addr));
423 	return (result);
424 }
425 
426 static __inline void
427 outbv(u_int port, u_char data)
428 {
429 	u_char	al;
430 	/*
431 	 * Use an unnecessary assignment to help gcc's register allocator.
432 	 * This make a large difference for gcc-1.40 and a tiny difference
433 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
434 	 * best results.  gcc-2.6.0 can't handle this.
435 	 */
436 	al = data;
437 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
438 }
439 
440 static __inline void
441 outl(u_int port, u_int data)
442 {
443 	/*
444 	 * outl() and outw() aren't used much so we haven't looked at
445 	 * possible micro-optimizations such as the unnecessary
446 	 * assignment for them.
447 	 */
448 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
449 }
450 
451 static __inline void
452 outsb(u_int port, const void *addr, size_t cnt)
453 {
454 	__asm __volatile("cld; rep; outsb"
455 			 : "+S" (addr), "+c" (cnt)
456 			 : "d" (port));
457 }
458 
459 static __inline void
460 outsw(u_int port, const void *addr, size_t cnt)
461 {
462 	__asm __volatile("cld; rep; outsw"
463 			 : "+S" (addr), "+c" (cnt)
464 			 : "d" (port));
465 }
466 
467 static __inline void
468 outsl(u_int port, const void *addr, size_t cnt)
469 {
470 	__asm __volatile("cld; rep; outsl"
471 			 : "+S" (addr), "+c" (cnt)
472 			 : "d" (port));
473 }
474 
475 static __inline void
476 outw(u_int port, u_short data)
477 {
478 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
479 }
480 
481 static __inline void
482 ia32_pause(void)
483 {
484 	__asm __volatile("pause");
485 }
486 
487 static __inline u_long
488 read_rflags(void)
489 {
490 	u_long	rf;
491 
492 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
493 	return (rf);
494 }
495 
496 static __inline u_int64_t
497 rdmsr(u_int msr)
498 {
499 	u_int32_t low, high;
500 
501 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
502 	return (low | ((u_int64_t)high << 32));
503 }
504 
505 static __inline u_int64_t
506 rdpmc(u_int pmc)
507 {
508 	u_int32_t low, high;
509 
510 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
511 	return (low | ((u_int64_t)high << 32));
512 }
513 
514 #define _RDTSC_SUPPORTED_
515 
516 static __inline u_int64_t
517 rdtsc(void)
518 {
519 	u_int32_t low, high;
520 
521 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
522 	return (low | ((u_int64_t)high << 32));
523 }
524 
525 static __inline void
526 wbinvd(void)
527 {
528 	__asm __volatile("wbinvd");
529 }
530 
531 static __inline void
532 write_rflags(u_long rf)
533 {
534 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
535 }
536 
537 static __inline void
538 wrmsr(u_int msr, u_int64_t newval)
539 {
540 	u_int32_t low, high;
541 
542 	low = newval;
543 	high = newval >> 32;
544 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
545 }
546 
547 static __inline void
548 load_cr0(u_long data)
549 {
550 
551 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
552 }
553 
554 static __inline u_long
555 rcr0(void)
556 {
557 	u_long	data;
558 
559 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
560 	return (data);
561 }
562 
563 static __inline u_long
564 rcr2(void)
565 {
566 	u_long	data;
567 
568 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
569 	return (data);
570 }
571 
572 static __inline void
573 load_cr3(u_long data)
574 {
575 
576 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
577 }
578 
579 static __inline u_long
580 rcr3(void)
581 {
582 	u_long	data;
583 
584 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
585 	return (data);
586 }
587 
588 static __inline void
589 load_cr4(u_long data)
590 {
591 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
592 }
593 
594 static __inline u_long
595 rcr4(void)
596 {
597 	u_long	data;
598 
599 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
600 	return (data);
601 }
602 
603 #ifndef _CPU_INVLTLB_DEFINED
604 
605 /*
606  * Invalidate the TLB on this cpu only
607  */
608 static __inline void
609 cpu_invltlb(void)
610 {
611 	load_cr3(rcr3());
612 #if defined(SWTCH_OPTIM_STATS)
613 	++tlb_flush_count;
614 #endif
615 }
616 
617 #endif
618 
619 /*
620  * TLB flush for an individual page (even if it has PG_G).
621  * Only works on 486+ CPUs (i386 does not have PG_G).
622  */
623 static __inline void
624 invlpg(u_long addr)
625 {
626 
627 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
628 }
629 
630 static __inline u_int
631 rfs(void)
632 {
633 	u_int sel;
634 	__asm __volatile("movl %%fs,%0" : "=rm" (sel));
635 	return (sel);
636 }
637 
638 static __inline u_int
639 rgs(void)
640 {
641 	u_int sel;
642 	__asm __volatile("movl %%gs,%0" : "=rm" (sel));
643 	return (sel);
644 }
645 
646 static __inline void
647 load_ds(u_int sel)
648 {
649 	__asm __volatile("movl %0,%%ds" : : "rm" (sel));
650 }
651 
652 static __inline void
653 load_es(u_int sel)
654 {
655 	__asm __volatile("movl %0,%%es" : : "rm" (sel));
656 }
657 
658 #ifdef _KERNEL
659 /* This is defined in <machine/specialreg.h> but is too painful to get to */
660 #ifndef	MSR_FSBASE
661 #define	MSR_FSBASE	0xc0000100
662 #endif
663 static __inline void
664 load_fs(u_int sel)
665 {
666 	/* Preserve the fsbase value across the selector load */
667         __asm __volatile("rdmsr; movl %0,%%fs; wrmsr"
668             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
669 }
670 
671 #ifndef	MSR_GSBASE
672 #define	MSR_GSBASE	0xc0000101
673 #endif
674 static __inline void
675 load_gs(u_int sel)
676 {
677 	/*
678 	 * Preserve the gsbase value across the selector load.
679 	 * Note that we have to disable interrupts because the gsbase
680 	 * being trashed happens to be the kernel gsbase at the time.
681 	 */
682         __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
683             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
684 }
685 #else
686 /* Usable by userland */
687 static __inline void
688 load_fs(u_int sel)
689 {
690 	__asm __volatile("movl %0,%%fs" : : "rm" (sel));
691 }
692 
693 static __inline void
694 load_gs(u_int sel)
695 {
696 	__asm __volatile("movl %0,%%gs" : : "rm" (sel));
697 }
698 #endif
699 
700 /* void lidt(struct region_descriptor *addr); */
701 static __inline void
702 lidt(struct region_descriptor *addr)
703 {
704 	__asm __volatile("lidt (%0)" : : "r" (addr));
705 }
706 
707 /* void lldt(u_short sel); */
708 static __inline void
709 lldt(u_short sel)
710 {
711 	__asm __volatile("lldt %0" : : "r" (sel));
712 }
713 
714 /* void ltr(u_short sel); */
715 static __inline void
716 ltr(u_short sel)
717 {
718 	__asm __volatile("ltr %0" : : "r" (sel));
719 }
720 
721 static __inline u_int64_t
722 rdr0(void)
723 {
724 	u_int64_t data;
725 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
726 	return (data);
727 }
728 
729 static __inline void
730 load_dr0(u_int64_t dr0)
731 {
732 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
733 }
734 
735 static __inline u_int64_t
736 rdr1(void)
737 {
738 	u_int64_t data;
739 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
740 	return (data);
741 }
742 
743 static __inline void
744 load_dr1(u_int64_t dr1)
745 {
746 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
747 }
748 
749 static __inline u_int64_t
750 rdr2(void)
751 {
752 	u_int64_t data;
753 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
754 	return (data);
755 }
756 
757 static __inline void
758 load_dr2(u_int64_t dr2)
759 {
760 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
761 }
762 
763 static __inline u_int64_t
764 rdr3(void)
765 {
766 	u_int64_t data;
767 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
768 	return (data);
769 }
770 
771 static __inline void
772 load_dr3(u_int64_t dr3)
773 {
774 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
775 }
776 
777 static __inline u_int64_t
778 rdr4(void)
779 {
780 	u_int64_t data;
781 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
782 	return (data);
783 }
784 
785 static __inline void
786 load_dr4(u_int64_t dr4)
787 {
788 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
789 }
790 
791 static __inline u_int64_t
792 rdr5(void)
793 {
794 	u_int64_t data;
795 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
796 	return (data);
797 }
798 
799 static __inline void
800 load_dr5(u_int64_t dr5)
801 {
802 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
803 }
804 
805 static __inline u_int64_t
806 rdr6(void)
807 {
808 	u_int64_t data;
809 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
810 	return (data);
811 }
812 
813 static __inline void
814 load_dr6(u_int64_t dr6)
815 {
816 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
817 }
818 
819 static __inline u_int64_t
820 rdr7(void)
821 {
822 	u_int64_t data;
823 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
824 	return (data);
825 }
826 
827 static __inline void
828 load_dr7(u_int64_t dr7)
829 {
830 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
831 }
832 
833 static __inline register_t
834 intr_disable(void)
835 {
836 	register_t rflags;
837 
838 	rflags = read_rflags();
839 	cpu_disable_intr();
840 	return (rflags);
841 }
842 
843 static __inline void
844 intr_restore(register_t rflags)
845 {
846 	write_rflags(rflags);
847 }
848 
849 #else /* !__GNUC__ */
850 
851 int	breakpoint(void);
852 void	cpu_pause(void);
853 u_int	bsfl(u_int mask);
854 u_int	bsrl(u_int mask);
855 void	cpu_disable_intr(void);
856 void	cpu_enable_intr(void);
857 void	cpu_invlpg(u_long addr);
858 void	cpu_invlpg_range(u_long start, u_long end);
859 void	do_cpuid(u_int ax, u_int *p);
860 void	halt(void);
861 u_char	inb(u_int port);
862 u_int	inl(u_int port);
863 void	insb(u_int port, void *addr, size_t cnt);
864 void	insl(u_int port, void *addr, size_t cnt);
865 void	insw(u_int port, void *addr, size_t cnt);
866 void	invd(void);
867 void	invlpg(u_int addr);
868 void	invlpg_range(u_int start, u_int end);
869 void	cpu_invltlb(void);
870 u_short	inw(u_int port);
871 void	load_cr0(u_int cr0);
872 void	load_cr3(u_int cr3);
873 void	load_cr4(u_int cr4);
874 void	load_fs(u_int sel);
875 void	load_gs(u_int sel);
876 struct region_descriptor;
877 void	lidt(struct region_descriptor *addr);
878 void	lldt(u_short sel);
879 void	ltr(u_short sel);
880 void	outb(u_int port, u_char data);
881 void	outl(u_int port, u_int data);
882 void	outsb(u_int port, void *addr, size_t cnt);
883 void	outsl(u_int port, void *addr, size_t cnt);
884 void	outsw(u_int port, void *addr, size_t cnt);
885 void	outw(u_int port, u_short data);
886 void	ia32_pause(void);
887 u_int	rcr0(void);
888 u_int	rcr2(void);
889 u_int	rcr3(void);
890 u_int	rcr4(void);
891 u_int	rfs(void);
892 u_int	rgs(void);
893 u_int64_t rdmsr(u_int msr);
894 u_int64_t rdpmc(u_int pmc);
895 u_int64_t rdtsc(void);
896 u_int	read_rflags(void);
897 void	wbinvd(void);
898 void	write_rflags(u_int rf);
899 void	wrmsr(u_int msr, u_int64_t newval);
900 u_int64_t	rdr0(void);
901 void	load_dr0(u_int64_t dr0);
902 u_int64_t	rdr1(void);
903 void	load_dr1(u_int64_t dr1);
904 u_int64_t	rdr2(void);
905 void	load_dr2(u_int64_t dr2);
906 u_int64_t	rdr3(void);
907 void	load_dr3(u_int64_t dr3);
908 u_int64_t	rdr4(void);
909 void	load_dr4(u_int64_t dr4);
910 u_int64_t	rdr5(void);
911 void	load_dr5(u_int64_t dr5);
912 u_int64_t	rdr6(void);
913 void	load_dr6(u_int64_t dr6);
914 u_int64_t	rdr7(void);
915 void	load_dr7(u_int64_t dr7);
916 register_t	intr_disable(void);
917 void	intr_restore(register_t rf);
918 
919 #endif	/* __GNUC__ */
920 
921 void	reset_dbregs(void);
922 
923 __END_DECLS
924 
925 #endif /* !_CPU_CPUFUNC_H_ */
926