xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision 28c26f7e)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
36  */
37 
38 /*
39  * Functions to provide access to special i386 instructions.
40  * This in included in sys/systm.h, and that file should be
41  * used in preference to this.
42  */
43 
44 #ifndef _CPU_CPUFUNC_H_
45 #define	_CPU_CPUFUNC_H_
46 
47 #include <sys/cdefs.h>
48 #include <machine/psl.h>
49 
50 struct thread;
51 struct region_descriptor;
52 
53 __BEGIN_DECLS
54 #define readb(va)	(*(volatile u_int8_t *) (va))
55 #define readw(va)	(*(volatile u_int16_t *) (va))
56 #define readl(va)	(*(volatile u_int32_t *) (va))
57 #define readq(va)	(*(volatile u_int64_t *) (va))
58 
59 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
60 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
61 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
62 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
63 
64 #ifdef	__GNUC__
65 
66 #ifdef SMP
67 #include <machine/lock.h>		/* XXX */
68 #endif
69 
70 static __inline void
71 breakpoint(void)
72 {
73 	__asm __volatile("int $3");
74 }
75 
76 static __inline void
77 cpu_pause(void)
78 {
79 	__asm __volatile("pause");
80 }
81 
82 static __inline u_int
83 bsfl(u_int mask)
84 {
85 	u_int	result;
86 
87 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
88 	return (result);
89 }
90 
91 static __inline u_long
92 bsfq(u_long mask)
93 {
94 	u_long	result;
95 
96 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
97 	return (result);
98 }
99 
100 static __inline u_int
101 bsrl(u_int mask)
102 {
103 	u_int	result;
104 
105 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
106 	return (result);
107 }
108 
109 static __inline u_long
110 bsrq(u_long mask)
111 {
112 	u_long	result;
113 
114 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
115 	return (result);
116 }
117 
118 static __inline void
119 cpu_disable_intr(void)
120 {
121 	__asm __volatile("cli" : : : "memory");
122 }
123 
124 static __inline void
125 do_cpuid(u_int ax, u_int *p)
126 {
127 	__asm __volatile("cpuid"
128 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
129 			 :  "0" (ax));
130 }
131 
132 static __inline void
133 cpuid_count(u_int ax, u_int cx, u_int *p)
134 {
135 	__asm __volatile("cpuid"
136 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
137 			 :  "0" (ax), "c" (cx));
138 }
139 
140 static __inline void
141 cpu_enable_intr(void)
142 {
143 	__asm __volatile("sti");
144 }
145 
146 /*
147  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
148  * write ordering.
149  *
150  * A serializing or fence instruction is required here.  A locked bus
151  * cycle on data for which we already own cache mastership is the most
152  * portable.
153  */
154 static __inline void
155 cpu_mfence(void)
156 {
157 #ifdef SMP
158 	__asm __volatile("mfence" : : : "memory");
159 #else
160 	__asm __volatile("" : : : "memory");
161 #endif
162 }
163 
164 /*
165  * cpu_lfence() ensures strong read ordering for reads issued prior
166  * to the instruction verses reads issued afterwords.
167  *
168  * A serializing or fence instruction is required here.  A locked bus
169  * cycle on data for which we already own cache mastership is the most
170  * portable.
171  */
172 static __inline void
173 cpu_lfence(void)
174 {
175 #ifdef SMP
176 	__asm __volatile("lfence" : : : "memory");
177 #else
178 	__asm __volatile("" : : : "memory");
179 #endif
180 }
181 
182 /*
183  * cpu_sfence() ensures strong write ordering for writes issued prior
184  * to the instruction verses writes issued afterwords.  Writes are
185  * ordered on intel cpus so we do not actually have to do anything.
186  */
187 static __inline void
188 cpu_sfence(void)
189 {
190 #ifdef SMP
191 	__asm __volatile("sfence" : : : "memory");
192 #else
193 	__asm __volatile("" : : : "memory");
194 #endif
195 }
196 
197 /*
198  * cpu_ccfence() prevents the compiler from reordering instructions, in
199  * particular stores, relative to the current cpu.  Use cpu_sfence() if
200  * you need to guarentee ordering by both the compiler and by the cpu.
201  *
202  * This also prevents the compiler from caching memory loads into local
203  * variables across the routine.
204  */
205 static __inline void
206 cpu_ccfence(void)
207 {
208 	__asm __volatile("" : : : "memory");
209 }
210 
211 #ifdef _KERNEL
212 
213 #define	HAVE_INLINE_FFS
214 
215 static __inline int
216 ffs(int mask)
217 {
218 #if 0
219 	/*
220 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
221 	 * this inline or turn off the builtin.  The builtin is faster but
222 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
223 	 * versions.
224 	 */
225 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
226 #else
227 	/* Actually, the above is way out of date.  The builtins use cmov etc */
228 	return (__builtin_ffs(mask));
229 #endif
230 }
231 
232 #define	HAVE_INLINE_FFSL
233 
234 static __inline int
235 ffsl(long mask)
236 {
237 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
238 }
239 
240 #define	HAVE_INLINE_FLS
241 
242 static __inline int
243 fls(int mask)
244 {
245 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
246 }
247 
248 #define	HAVE_INLINE_FLSL
249 
250 static __inline int
251 flsl(long mask)
252 {
253 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
254 }
255 
256 #endif /* _KERNEL */
257 
258 static __inline void
259 halt(void)
260 {
261 	__asm __volatile("hlt");
262 }
263 
264 /*
265  * The following complications are to get around gcc not having a
266  * constraint letter for the range 0..255.  We still put "d" in the
267  * constraint because "i" isn't a valid constraint when the port
268  * isn't constant.  This only matters for -O0 because otherwise
269  * the non-working version gets optimized away.
270  *
271  * Use an expression-statement instead of a conditional expression
272  * because gcc-2.6.0 would promote the operands of the conditional
273  * and produce poor code for "if ((inb(var) & const1) == const2)".
274  *
275  * The unnecessary test `(port) < 0x10000' is to generate a warning if
276  * the `port' has type u_short or smaller.  Such types are pessimal.
277  * This actually only works for signed types.  The range check is
278  * careful to avoid generating warnings.
279  */
280 #define	inb(port) __extension__ ({					\
281 	u_char	_data;							\
282 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
283 	    && (port) < 0x10000)					\
284 		_data = inbc(port);					\
285 	else								\
286 		_data = inbv(port);					\
287 	_data; })
288 
289 #define	outb(port, data) (						\
290 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
291 	&& (port) < 0x10000						\
292 	? outbc(port, data) : outbv(port, data))
293 
294 static __inline u_char
295 inbc(u_int port)
296 {
297 	u_char	data;
298 
299 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
300 	return (data);
301 }
302 
303 static __inline void
304 outbc(u_int port, u_char data)
305 {
306 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
307 }
308 
309 static __inline u_char
310 inbv(u_int port)
311 {
312 	u_char	data;
313 	/*
314 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
315 	 * %edx, while gcc generates inferior code (movw instead of movl)
316 	 * if we tell it to load (u_short) port.
317 	 */
318 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
319 	return (data);
320 }
321 
322 static __inline u_int
323 inl(u_int port)
324 {
325 	u_int	data;
326 
327 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
328 	return (data);
329 }
330 
331 static __inline void
332 insb(u_int port, void *addr, size_t cnt)
333 {
334 	__asm __volatile("cld; rep; insb"
335 			 : "+D" (addr), "+c" (cnt)
336 			 : "d" (port)
337 			 : "memory");
338 }
339 
340 static __inline void
341 insw(u_int port, void *addr, size_t cnt)
342 {
343 	__asm __volatile("cld; rep; insw"
344 			 : "+D" (addr), "+c" (cnt)
345 			 : "d" (port)
346 			 : "memory");
347 }
348 
349 static __inline void
350 insl(u_int port, void *addr, size_t cnt)
351 {
352 	__asm __volatile("cld; rep; insl"
353 			 : "+D" (addr), "+c" (cnt)
354 			 : "d" (port)
355 			 : "memory");
356 }
357 
358 static __inline void
359 invd(void)
360 {
361 	__asm __volatile("invd");
362 }
363 
364 #if defined(_KERNEL)
365 
366 /*
367  * If we are not a true-SMP box then smp_invltlb() is a NOP.  Note that this
368  * will cause the invl*() functions to be equivalent to the cpu_invl*()
369  * functions.
370  */
371 #ifdef SMP
372 void smp_invltlb(void);
373 #else
374 #define smp_invltlb()
375 #endif
376 
377 #ifndef _CPU_INVLPG_DEFINED
378 
379 /*
380  * Invalidate a patricular VA on this cpu only
381  */
382 static __inline void
383 cpu_invlpg(void *addr)
384 {
385 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
386 }
387 
388 #endif
389 
390 static __inline void
391 cpu_nop(void)
392 {
393 	__asm __volatile("rep; nop");
394 }
395 
396 #endif	/* _KERNEL */
397 
398 static __inline u_short
399 inw(u_int port)
400 {
401 	u_short	data;
402 
403 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
404 	return (data);
405 }
406 
407 static __inline u_int
408 loadandclear(volatile u_int *addr)
409 {
410 	u_int   result;
411 
412 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
413 			: "=&r" (result) : "m" (*addr));
414 	return (result);
415 }
416 
417 static __inline void
418 outbv(u_int port, u_char data)
419 {
420 	u_char	al;
421 	/*
422 	 * Use an unnecessary assignment to help gcc's register allocator.
423 	 * This make a large difference for gcc-1.40 and a tiny difference
424 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
425 	 * best results.  gcc-2.6.0 can't handle this.
426 	 */
427 	al = data;
428 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
429 }
430 
431 static __inline void
432 outl(u_int port, u_int data)
433 {
434 	/*
435 	 * outl() and outw() aren't used much so we haven't looked at
436 	 * possible micro-optimizations such as the unnecessary
437 	 * assignment for them.
438 	 */
439 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
440 }
441 
442 static __inline void
443 outsb(u_int port, const void *addr, size_t cnt)
444 {
445 	__asm __volatile("cld; rep; outsb"
446 			 : "+S" (addr), "+c" (cnt)
447 			 : "d" (port));
448 }
449 
450 static __inline void
451 outsw(u_int port, const void *addr, size_t cnt)
452 {
453 	__asm __volatile("cld; rep; outsw"
454 			 : "+S" (addr), "+c" (cnt)
455 			 : "d" (port));
456 }
457 
458 static __inline void
459 outsl(u_int port, const void *addr, size_t cnt)
460 {
461 	__asm __volatile("cld; rep; outsl"
462 			 : "+S" (addr), "+c" (cnt)
463 			 : "d" (port));
464 }
465 
466 static __inline void
467 outw(u_int port, u_short data)
468 {
469 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
470 }
471 
472 static __inline void
473 ia32_pause(void)
474 {
475 	__asm __volatile("pause");
476 }
477 
478 static __inline u_long
479 read_rflags(void)
480 {
481 	u_long	rf;
482 
483 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
484 	return (rf);
485 }
486 
487 static __inline u_int64_t
488 rdmsr(u_int msr)
489 {
490 	u_int32_t low, high;
491 
492 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
493 	return (low | ((u_int64_t)high << 32));
494 }
495 
496 static __inline u_int64_t
497 rdpmc(u_int pmc)
498 {
499 	u_int32_t low, high;
500 
501 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
502 	return (low | ((u_int64_t)high << 32));
503 }
504 
505 #define _RDTSC_SUPPORTED_
506 
507 static __inline u_int64_t
508 rdtsc(void)
509 {
510 	u_int32_t low, high;
511 
512 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
513 	return (low | ((u_int64_t)high << 32));
514 }
515 
516 static __inline void
517 wbinvd(void)
518 {
519 	__asm __volatile("wbinvd");
520 }
521 
522 static __inline void
523 write_rflags(u_long rf)
524 {
525 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
526 }
527 
528 static __inline void
529 wrmsr(u_int msr, u_int64_t newval)
530 {
531 	u_int32_t low, high;
532 
533 	low = newval;
534 	high = newval >> 32;
535 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
536 }
537 
538 static __inline void
539 load_cr0(u_long data)
540 {
541 
542 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
543 }
544 
545 static __inline u_long
546 rcr0(void)
547 {
548 	u_long	data;
549 
550 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
551 	return (data);
552 }
553 
554 static __inline u_long
555 rcr2(void)
556 {
557 	u_long	data;
558 
559 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
560 	return (data);
561 }
562 
563 static __inline void
564 load_cr3(u_long data)
565 {
566 
567 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
568 }
569 
570 static __inline u_long
571 rcr3(void)
572 {
573 	u_long	data;
574 
575 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
576 	return (data);
577 }
578 
579 static __inline void
580 load_cr4(u_long data)
581 {
582 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
583 }
584 
585 static __inline u_long
586 rcr4(void)
587 {
588 	u_long	data;
589 
590 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
591 	return (data);
592 }
593 
594 /*
595  * Global TLB flush (except for thise for pages marked PG_G)
596  */
597 static __inline void
598 cpu_invltlb(void)
599 {
600 
601 	load_cr3(rcr3());
602 }
603 
604 /*
605  * TLB flush for an individual page (even if it has PG_G).
606  * Only works on 486+ CPUs (i386 does not have PG_G).
607  */
608 static __inline void
609 invlpg(u_long addr)
610 {
611 
612 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
613 }
614 
615 static __inline u_int
616 rfs(void)
617 {
618 	u_int sel;
619 	__asm __volatile("movl %%fs,%0" : "=rm" (sel));
620 	return (sel);
621 }
622 
623 static __inline u_int
624 rgs(void)
625 {
626 	u_int sel;
627 	__asm __volatile("movl %%gs,%0" : "=rm" (sel));
628 	return (sel);
629 }
630 
631 static __inline void
632 load_ds(u_int sel)
633 {
634 	__asm __volatile("movl %0,%%ds" : : "rm" (sel));
635 }
636 
637 static __inline void
638 load_es(u_int sel)
639 {
640 	__asm __volatile("movl %0,%%es" : : "rm" (sel));
641 }
642 
643 #ifdef _KERNEL
644 /* This is defined in <machine/specialreg.h> but is too painful to get to */
645 #ifndef	MSR_FSBASE
646 #define	MSR_FSBASE	0xc0000100
647 #endif
648 static __inline void
649 load_fs(u_int sel)
650 {
651 	/* Preserve the fsbase value across the selector load */
652         __asm __volatile("rdmsr; movl %0,%%fs; wrmsr"
653             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
654 }
655 
656 #ifndef	MSR_GSBASE
657 #define	MSR_GSBASE	0xc0000101
658 #endif
659 static __inline void
660 load_gs(u_int sel)
661 {
662 	/*
663 	 * Preserve the gsbase value across the selector load.
664 	 * Note that we have to disable interrupts because the gsbase
665 	 * being trashed happens to be the kernel gsbase at the time.
666 	 */
667         __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
668             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
669 }
670 #else
671 /* Usable by userland */
672 static __inline void
673 load_fs(u_int sel)
674 {
675 	__asm __volatile("movl %0,%%fs" : : "rm" (sel));
676 }
677 
678 static __inline void
679 load_gs(u_int sel)
680 {
681 	__asm __volatile("movl %0,%%gs" : : "rm" (sel));
682 }
683 #endif
684 
685 /* void lidt(struct region_descriptor *addr); */
686 static __inline void
687 lidt(struct region_descriptor *addr)
688 {
689 	__asm __volatile("lidt (%0)" : : "r" (addr));
690 }
691 
692 /* void lldt(u_short sel); */
693 static __inline void
694 lldt(u_short sel)
695 {
696 	__asm __volatile("lldt %0" : : "r" (sel));
697 }
698 
699 /* void ltr(u_short sel); */
700 static __inline void
701 ltr(u_short sel)
702 {
703 	__asm __volatile("ltr %0" : : "r" (sel));
704 }
705 
706 static __inline u_int64_t
707 rdr0(void)
708 {
709 	u_int64_t data;
710 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
711 	return (data);
712 }
713 
714 static __inline void
715 load_dr0(u_int64_t dr0)
716 {
717 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
718 }
719 
720 static __inline u_int64_t
721 rdr1(void)
722 {
723 	u_int64_t data;
724 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
725 	return (data);
726 }
727 
728 static __inline void
729 load_dr1(u_int64_t dr1)
730 {
731 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
732 }
733 
734 static __inline u_int64_t
735 rdr2(void)
736 {
737 	u_int64_t data;
738 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
739 	return (data);
740 }
741 
742 static __inline void
743 load_dr2(u_int64_t dr2)
744 {
745 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
746 }
747 
748 static __inline u_int64_t
749 rdr3(void)
750 {
751 	u_int64_t data;
752 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
753 	return (data);
754 }
755 
756 static __inline void
757 load_dr3(u_int64_t dr3)
758 {
759 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
760 }
761 
762 static __inline u_int64_t
763 rdr4(void)
764 {
765 	u_int64_t data;
766 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
767 	return (data);
768 }
769 
770 static __inline void
771 load_dr4(u_int64_t dr4)
772 {
773 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
774 }
775 
776 static __inline u_int64_t
777 rdr5(void)
778 {
779 	u_int64_t data;
780 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
781 	return (data);
782 }
783 
784 static __inline void
785 load_dr5(u_int64_t dr5)
786 {
787 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
788 }
789 
790 static __inline u_int64_t
791 rdr6(void)
792 {
793 	u_int64_t data;
794 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
795 	return (data);
796 }
797 
798 static __inline void
799 load_dr6(u_int64_t dr6)
800 {
801 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
802 }
803 
804 static __inline u_int64_t
805 rdr7(void)
806 {
807 	u_int64_t data;
808 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
809 	return (data);
810 }
811 
812 static __inline void
813 load_dr7(u_int64_t dr7)
814 {
815 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
816 }
817 
818 static __inline register_t
819 intr_disable(void)
820 {
821 	register_t rflags;
822 
823 	rflags = read_rflags();
824 	cpu_disable_intr();
825 	return (rflags);
826 }
827 
828 static __inline void
829 intr_restore(register_t rflags)
830 {
831 	write_rflags(rflags);
832 }
833 
834 #else /* !__GNUC__ */
835 
836 int	breakpoint(void);
837 void	cpu_pause(void);
838 u_int	bsfl(u_int mask);
839 u_int	bsrl(u_int mask);
840 void	cpu_disable_intr(void);
841 void	cpu_enable_intr(void);
842 void	cpu_invlpg(u_long addr);
843 void	cpu_invlpg_range(u_long start, u_long end);
844 void	do_cpuid(u_int ax, u_int *p);
845 void	halt(void);
846 u_char	inb(u_int port);
847 u_int	inl(u_int port);
848 void	insb(u_int port, void *addr, size_t cnt);
849 void	insl(u_int port, void *addr, size_t cnt);
850 void	insw(u_int port, void *addr, size_t cnt);
851 void	invd(void);
852 void	invlpg(u_int addr);
853 void	invlpg_range(u_int start, u_int end);
854 void	cpu_invltlb(void);
855 u_short	inw(u_int port);
856 void	load_cr0(u_int cr0);
857 void	load_cr3(u_int cr3);
858 void	load_cr4(u_int cr4);
859 void	load_fs(u_int sel);
860 void	load_gs(u_int sel);
861 struct region_descriptor;
862 void	lidt(struct region_descriptor *addr);
863 void	lldt(u_short sel);
864 void	ltr(u_short sel);
865 void	outb(u_int port, u_char data);
866 void	outl(u_int port, u_int data);
867 void	outsb(u_int port, void *addr, size_t cnt);
868 void	outsl(u_int port, void *addr, size_t cnt);
869 void	outsw(u_int port, void *addr, size_t cnt);
870 void	outw(u_int port, u_short data);
871 void	ia32_pause(void);
872 u_int	rcr0(void);
873 u_int	rcr2(void);
874 u_int	rcr3(void);
875 u_int	rcr4(void);
876 u_int	rfs(void);
877 u_int	rgs(void);
878 u_int64_t rdmsr(u_int msr);
879 u_int64_t rdpmc(u_int pmc);
880 u_int64_t rdtsc(void);
881 u_int	read_rflags(void);
882 void	wbinvd(void);
883 void	write_rflags(u_int rf);
884 void	wrmsr(u_int msr, u_int64_t newval);
885 u_int64_t	rdr0(void);
886 void	load_dr0(u_int64_t dr0);
887 u_int64_t	rdr1(void);
888 void	load_dr1(u_int64_t dr1);
889 u_int64_t	rdr2(void);
890 void	load_dr2(u_int64_t dr2);
891 u_int64_t	rdr3(void);
892 void	load_dr3(u_int64_t dr3);
893 u_int64_t	rdr4(void);
894 void	load_dr4(u_int64_t dr4);
895 u_int64_t	rdr5(void);
896 void	load_dr5(u_int64_t dr5);
897 u_int64_t	rdr6(void);
898 void	load_dr6(u_int64_t dr6);
899 u_int64_t	rdr7(void);
900 void	load_dr7(u_int64_t dr7);
901 register_t	intr_disable(void);
902 void	intr_restore(register_t rf);
903 
904 #endif	/* __GNUC__ */
905 
906 void	reset_dbregs(void);
907 
908 __END_DECLS
909 
910 #endif /* !_CPU_CPUFUNC_H_ */
911