xref: /dragonfly/sys/cpu/x86_64/include/cpufunc.h (revision e98bdfd3)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1993 The Regents of the University of California.
4  * Copyright (c) 2008 The DragonFly Project.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $
32  */
33 
34 /*
35  * Functions to provide access to special i386 instructions.
36  * This in included in sys/systm.h, and that file should be
37  * used in preference to this.
38  */
39 
40 #ifndef _CPU_CPUFUNC_H_
41 #define	_CPU_CPUFUNC_H_
42 
43 #include <sys/cdefs.h>
44 #include <sys/thread.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
47 
48 struct thread;
49 struct region_descriptor;
50 
51 __BEGIN_DECLS
52 #define readb(va)	(*(volatile u_int8_t *) (va))
53 #define readw(va)	(*(volatile u_int16_t *) (va))
54 #define readl(va)	(*(volatile u_int32_t *) (va))
55 #define readq(va)	(*(volatile u_int64_t *) (va))
56 
57 #define writeb(va, d)	(*(volatile u_int8_t *) (va) = (d))
58 #define writew(va, d)	(*(volatile u_int16_t *) (va) = (d))
59 #define writel(va, d)	(*(volatile u_int32_t *) (va) = (d))
60 #define writeq(va, d)	(*(volatile u_int64_t *) (va) = (d))
61 
62 #ifdef	__GNUC__
63 
64 #include <machine/lock.h>		/* XXX */
65 
66 static __inline void
67 breakpoint(void)
68 {
69 	__asm __volatile("int $3");
70 }
71 
72 static __inline void
73 cpu_pause(void)
74 {
75 	__asm __volatile("pause":::"memory");
76 }
77 
78 static __inline u_int
79 bsfl(u_int mask)
80 {
81 	u_int	result;
82 
83 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
84 	return (result);
85 }
86 
87 static __inline u_long
88 bsfq(u_long mask)
89 {
90 	u_long	result;
91 
92 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
93 	return (result);
94 }
95 
96 static __inline u_long
97 bsflong(u_long mask)
98 {
99 	u_long	result;
100 
101 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
102 	return (result);
103 }
104 
105 static __inline u_int
106 bsrl(u_int mask)
107 {
108 	u_int	result;
109 
110 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
111 	return (result);
112 }
113 
114 static __inline u_long
115 bsrq(u_long mask)
116 {
117 	u_long	result;
118 
119 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
120 	return (result);
121 }
122 
123 static __inline void
124 clflush(u_long addr)
125 {
126 	__asm __volatile("clflush %0" : : "m" (*(char *) addr));
127 }
128 
129 static __inline void
130 do_cpuid(u_int ax, u_int *p)
131 {
132 	__asm __volatile("cpuid"
133 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
134 			 :  "0" (ax));
135 }
136 
137 static __inline void
138 cpuid_count(u_int ax, u_int cx, u_int *p)
139 {
140 	__asm __volatile("cpuid"
141 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
142 			 :  "0" (ax), "c" (cx));
143 }
144 
145 #ifndef _CPU_DISABLE_INTR_DEFINED
146 
147 static __inline void
148 cpu_disable_intr(void)
149 {
150 	__asm __volatile("cli" : : : "memory");
151 }
152 
153 #endif
154 
155 #ifndef _CPU_ENABLE_INTR_DEFINED
156 
157 static __inline void
158 cpu_enable_intr(void)
159 {
160 	__asm __volatile("sti");
161 }
162 
163 #endif
164 
165 /*
166  * Cpu and compiler memory ordering fence.  mfence ensures strong read and
167  * write ordering.
168  *
169  * A serializing or fence instruction is required here.  A locked bus
170  * cycle on data for which we already own cache mastership is the most
171  * portable.
172  */
173 static __inline void
174 cpu_mfence(void)
175 {
176 	__asm __volatile("mfence" : : : "memory");
177 }
178 
179 /*
180  * cpu_lfence() ensures strong read ordering for reads issued prior
181  * to the instruction verses reads issued afterwords.
182  *
183  * A serializing or fence instruction is required here.  A locked bus
184  * cycle on data for which we already own cache mastership is the most
185  * portable.
186  */
187 static __inline void
188 cpu_lfence(void)
189 {
190 	__asm __volatile("lfence" : : : "memory");
191 }
192 
193 /*
194  * cpu_sfence() ensures strong write ordering for writes issued prior
195  * to the instruction verses writes issued afterwords.  Writes are
196  * ordered on intel cpus so we do not actually have to do anything.
197  */
198 static __inline void
199 cpu_sfence(void)
200 {
201 	/*
202 	 * NOTE:
203 	 * Don't use 'sfence' here, as it will create a lot of
204 	 * unnecessary stalls.
205 	 */
206 	__asm __volatile("" : : : "memory");
207 }
208 
209 /*
210  * cpu_ccfence() prevents the compiler from reordering instructions, in
211  * particular stores, relative to the current cpu.  Use cpu_sfence() if
212  * you need to guarentee ordering by both the compiler and by the cpu.
213  *
214  * This also prevents the compiler from caching memory loads into local
215  * variables across the routine.
216  */
217 static __inline void
218 cpu_ccfence(void)
219 {
220 	__asm __volatile("" : : : "memory");
221 }
222 
223 /*
224  * This is a horrible, horrible hack that might have to be put at the
225  * end of certain procedures (on a case by case basis), just before it
226  * returns to avoid what we believe to be an unreported AMD cpu bug.
227  * Found to occur on both a Phenom II X4 820 (two of them), as well
228  * as a 48-core built around an Opteron 6168 (Id = 0x100f91  Stepping = 1).
229  * The problem does not appear to occur w/Intel cpus.
230  *
231  * The bug is likely related to either a write combining issue or the
232  * Return Address Stack (RAS) hardware cache.
233  *
234  * In particular, we had to do this for GCC's fill_sons_in_loop() routine
235  * which due to its deep recursion and stack flow appears to be able to
236  * tickle the amd cpu bug (w/ gcc-4.4.7).  Adding a single 'nop' to the
237  * end of the routine just before it returns works around the bug.
238  *
239  * The bug appears to be extremely sensitive to %rip and %rsp values, to
240  * the point where even just inserting an instruction in an unrelated
241  * procedure (shifting the entire code base being run) effects the outcome.
242  * DragonFly is probably able to more readily reproduce the bug due to
243  * the stackgap randomization code.  We would expect OpenBSD (where we got
244  * the stackgap randomization code from) to also be able to reproduce the
245  * issue.  To date we have only reproduced the issue in DragonFly.
246  */
247 #define __AMDCPUBUG_DFLY01_AVAILABLE__
248 
249 static __inline void
250 cpu_amdcpubug_dfly01(void)
251 {
252 	__asm __volatile("nop" : : : "memory");
253 }
254 
255 #ifdef _KERNEL
256 
257 #define	HAVE_INLINE_FFS
258 
259 static __inline int
260 ffs(int mask)
261 {
262 #if 0
263 	/*
264 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
265 	 * this inline or turn off the builtin.  The builtin is faster but
266 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
267 	 * versions.
268 	 */
269 	return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
270 #else
271 	/* Actually, the above is way out of date.  The builtins use cmov etc */
272 	return (__builtin_ffs(mask));
273 #endif
274 }
275 
276 #define	HAVE_INLINE_FFSL
277 
278 static __inline int
279 ffsl(long mask)
280 {
281 	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
282 }
283 
284 #define	HAVE_INLINE_FLS
285 
286 static __inline int
287 fls(int mask)
288 {
289 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
290 }
291 
292 #define	HAVE_INLINE_FLSL
293 
294 static __inline int
295 flsl(long mask)
296 {
297 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
298 }
299 
300 #define	HAVE_INLINE_FLSLL
301 
302 static __inline int
303 flsll(long long mask)
304 {
305 	return (flsl((long)mask));
306 }
307 
308 #endif /* _KERNEL */
309 
310 static __inline void
311 halt(void)
312 {
313 	__asm __volatile("hlt");
314 }
315 
316 /*
317  * The following complications are to get around gcc not having a
318  * constraint letter for the range 0..255.  We still put "d" in the
319  * constraint because "i" isn't a valid constraint when the port
320  * isn't constant.  This only matters for -O0 because otherwise
321  * the non-working version gets optimized away.
322  *
323  * Use an expression-statement instead of a conditional expression
324  * because gcc-2.6.0 would promote the operands of the conditional
325  * and produce poor code for "if ((inb(var) & const1) == const2)".
326  *
327  * The unnecessary test `(port) < 0x10000' is to generate a warning if
328  * the `port' has type u_short or smaller.  Such types are pessimal.
329  * This actually only works for signed types.  The range check is
330  * careful to avoid generating warnings.
331  */
332 #define	inb(port) __extension__ ({					\
333 	u_char	_data;							\
334 	if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100	\
335 	    && (port) < 0x10000)					\
336 		_data = inbc(port);					\
337 	else								\
338 		_data = inbv(port);					\
339 	_data; })
340 
341 #define	outb(port, data) (						\
342 	__builtin_constant_p(port) && ((port) & 0xffff) < 0x100		\
343 	&& (port) < 0x10000						\
344 	? outbc(port, data) : outbv(port, data))
345 
346 static __inline u_char
347 inbc(u_int port)
348 {
349 	u_char	data;
350 
351 	__asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port)));
352 	return (data);
353 }
354 
355 static __inline void
356 outbc(u_int port, u_char data)
357 {
358 	__asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port)));
359 }
360 
361 static __inline u_char
362 inbv(u_int port)
363 {
364 	u_char	data;
365 	/*
366 	 * We use %%dx and not %1 here because i/o is done at %dx and not at
367 	 * %edx, while gcc generates inferior code (movw instead of movl)
368 	 * if we tell it to load (u_short) port.
369 	 */
370 	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
371 	return (data);
372 }
373 
374 static __inline u_int
375 inl(u_int port)
376 {
377 	u_int	data;
378 
379 	__asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port));
380 	return (data);
381 }
382 
383 static __inline void
384 insb(u_int port, void *addr, size_t cnt)
385 {
386 	__asm __volatile("cld; rep; insb"
387 			 : "+D" (addr), "+c" (cnt)
388 			 : "d" (port)
389 			 : "memory");
390 }
391 
392 static __inline void
393 insw(u_int port, void *addr, size_t cnt)
394 {
395 	__asm __volatile("cld; rep; insw"
396 			 : "+D" (addr), "+c" (cnt)
397 			 : "d" (port)
398 			 : "memory");
399 }
400 
401 static __inline void
402 insl(u_int port, void *addr, size_t cnt)
403 {
404 	__asm __volatile("cld; rep; insl"
405 			 : "+D" (addr), "+c" (cnt)
406 			 : "d" (port)
407 			 : "memory");
408 }
409 
410 static __inline void
411 invd(void)
412 {
413 	__asm __volatile("invd");
414 }
415 
416 #if defined(_KERNEL)
417 
418 void smp_invltlb(void);
419 void smp_invltlb_intr(void);
420 
421 #ifndef _CPU_INVLPG_DEFINED
422 
423 /*
424  * Invalidate a particular VA on this cpu only
425  *
426  * TLB flush for an individual page (even if it has PG_G).
427  * Only works on 486+ CPUs (i386 does not have PG_G).
428  */
429 static __inline void
430 cpu_invlpg(void *addr)
431 {
432 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
433 }
434 
435 #endif
436 
437 #if defined(_KERNEL)
438 struct smp_invlpg_range_cpusync_arg {
439 	vm_offset_t sva;
440 	vm_offset_t eva;
441 };
442 
443 void
444 smp_invlpg_range_cpusync(void *arg);
445 
446 static __inline void
447 smp_invlpg_range(cpumask_t mask, vm_offset_t sva, vm_offset_t eva)
448 {
449 	struct smp_invlpg_range_cpusync_arg arg;
450 
451 	arg.sva = sva;
452 	arg.eva = eva;
453 	lwkt_cpusync_simple(mask, smp_invlpg_range_cpusync, &arg);
454 }
455 #endif
456 
457 static __inline void
458 cpu_nop(void)
459 {
460 	__asm __volatile("rep; nop");
461 }
462 
463 #endif	/* _KERNEL */
464 
465 static __inline u_short
466 inw(u_int port)
467 {
468 	u_short	data;
469 
470 	__asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port));
471 	return (data);
472 }
473 
474 static __inline u_int
475 loadandclear(volatile u_int *addr)
476 {
477 	u_int   result;
478 
479 	__asm __volatile("xorl %0,%0; xchgl %1,%0"
480 			: "=&r" (result) : "m" (*addr));
481 	return (result);
482 }
483 
484 static __inline void
485 outbv(u_int port, u_char data)
486 {
487 	u_char	al;
488 	/*
489 	 * Use an unnecessary assignment to help gcc's register allocator.
490 	 * This make a large difference for gcc-1.40 and a tiny difference
491 	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
492 	 * best results.  gcc-2.6.0 can't handle this.
493 	 */
494 	al = data;
495 	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
496 }
497 
498 static __inline void
499 outl(u_int port, u_int data)
500 {
501 	/*
502 	 * outl() and outw() aren't used much so we haven't looked at
503 	 * possible micro-optimizations such as the unnecessary
504 	 * assignment for them.
505 	 */
506 	__asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port));
507 }
508 
509 static __inline void
510 outsb(u_int port, const void *addr, size_t cnt)
511 {
512 	__asm __volatile("cld; rep; outsb"
513 			 : "+S" (addr), "+c" (cnt)
514 			 : "d" (port));
515 }
516 
517 static __inline void
518 outsw(u_int port, const void *addr, size_t cnt)
519 {
520 	__asm __volatile("cld; rep; outsw"
521 			 : "+S" (addr), "+c" (cnt)
522 			 : "d" (port));
523 }
524 
525 static __inline void
526 outsl(u_int port, const void *addr, size_t cnt)
527 {
528 	__asm __volatile("cld; rep; outsl"
529 			 : "+S" (addr), "+c" (cnt)
530 			 : "d" (port));
531 }
532 
533 static __inline void
534 outw(u_int port, u_short data)
535 {
536 	__asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port));
537 }
538 
539 static __inline void
540 ia32_pause(void)
541 {
542 	__asm __volatile("pause");
543 }
544 
545 static __inline u_long
546 read_rflags(void)
547 {
548 	u_long	rf;
549 
550 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
551 	return (rf);
552 }
553 
554 static __inline u_int64_t
555 rdmsr(u_int msr)
556 {
557 	u_int32_t low, high;
558 
559 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
560 	return (low | ((u_int64_t)high << 32));
561 }
562 
563 static __inline u_int64_t
564 rdpmc(u_int pmc)
565 {
566 	u_int32_t low, high;
567 
568 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
569 	return (low | ((u_int64_t)high << 32));
570 }
571 
572 #define _RDTSC_SUPPORTED_
573 
574 static __inline u_int64_t
575 rdtsc(void)
576 {
577 	u_int32_t low, high;
578 
579 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
580 	return (low | ((u_int64_t)high << 32));
581 }
582 
583 #ifdef _KERNEL
584 #include <machine/cputypes.h>
585 #include <machine/md_var.h>
586 
587 static __inline u_int64_t
588 rdtsc_ordered(void)
589 {
590 	if (cpu_vendor_id == CPU_VENDOR_INTEL)
591 		cpu_lfence();
592 	else
593 		cpu_mfence();
594 	return rdtsc();
595 }
596 #endif
597 
598 static __inline void
599 wbinvd(void)
600 {
601 	__asm __volatile("wbinvd");
602 }
603 
604 #if defined(_KERNEL)
605 void cpu_wbinvd_on_all_cpus_callback(void *arg);
606 
607 static __inline void
608 cpu_wbinvd_on_all_cpus(void)
609 {
610 	lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL);
611 }
612 #endif
613 
614 static __inline void
615 write_rflags(u_long rf)
616 {
617 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
618 }
619 
620 static __inline void
621 wrmsr(u_int msr, u_int64_t newval)
622 {
623 	u_int32_t low, high;
624 
625 	low = newval;
626 	high = newval >> 32;
627 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
628 }
629 
630 static __inline void
631 xsetbv(u_int ecx, u_int eax, u_int edx)
632 {
633 	__asm __volatile(".byte 0x0f,0x01,0xd1"
634 	    :
635 	    : "a" (eax), "c" (ecx), "d" (edx));
636 }
637 
638 static __inline void
639 load_cr0(u_long data)
640 {
641 
642 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
643 }
644 
645 static __inline u_long
646 rcr0(void)
647 {
648 	u_long	data;
649 
650 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
651 	return (data);
652 }
653 
654 static __inline u_long
655 rcr2(void)
656 {
657 	u_long	data;
658 
659 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
660 	return (data);
661 }
662 
663 static __inline void
664 load_cr3(u_long data)
665 {
666 
667 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
668 }
669 
670 static __inline u_long
671 rcr3(void)
672 {
673 	u_long	data;
674 
675 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
676 	return (data);
677 }
678 
679 static __inline void
680 load_cr4(u_long data)
681 {
682 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
683 }
684 
685 static __inline u_long
686 rcr4(void)
687 {
688 	u_long	data;
689 
690 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
691 	return (data);
692 }
693 
694 #ifndef _CPU_INVLTLB_DEFINED
695 
696 /*
697  * Invalidate the TLB on this cpu only
698  */
699 static __inline void
700 cpu_invltlb(void)
701 {
702 	load_cr3(rcr3());
703 #if defined(SWTCH_OPTIM_STATS)
704 	++tlb_flush_count;
705 #endif
706 }
707 
708 #endif
709 
710 static __inline u_short
711 rfs(void)
712 {
713 	u_short sel;
714 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
715 	return (sel);
716 }
717 
718 static __inline u_short
719 rgs(void)
720 {
721 	u_short sel;
722 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
723 	return (sel);
724 }
725 
726 static __inline void
727 load_ds(u_short sel)
728 {
729 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
730 }
731 
732 static __inline void
733 load_es(u_short sel)
734 {
735 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
736 }
737 
738 #ifdef _KERNEL
739 /* This is defined in <machine/specialreg.h> but is too painful to get to */
740 #ifndef	MSR_FSBASE
741 #define	MSR_FSBASE	0xc0000100
742 #endif
743 static __inline void
744 load_fs(u_short sel)
745 {
746 	/* Preserve the fsbase value across the selector load */
747 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
748             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
749 }
750 
751 #ifndef	MSR_GSBASE
752 #define	MSR_GSBASE	0xc0000101
753 #endif
754 static __inline void
755 load_gs(u_short sel)
756 {
757 	/*
758 	 * Preserve the gsbase value across the selector load.
759 	 * Note that we have to disable interrupts because the gsbase
760 	 * being trashed happens to be the kernel gsbase at the time.
761 	 */
762 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
763             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
764 }
765 #else
766 /* Usable by userland */
767 static __inline void
768 load_fs(u_short sel)
769 {
770 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
771 }
772 
773 static __inline void
774 load_gs(u_short sel)
775 {
776 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
777 }
778 #endif
779 
780 /* void lidt(struct region_descriptor *addr); */
781 static __inline void
782 lidt(struct region_descriptor *addr)
783 {
784 	__asm __volatile("lidt (%0)" : : "r" (addr));
785 }
786 
787 /* void lldt(u_short sel); */
788 static __inline void
789 lldt(u_short sel)
790 {
791 	__asm __volatile("lldt %0" : : "r" (sel));
792 }
793 
794 /* void ltr(u_short sel); */
795 static __inline void
796 ltr(u_short sel)
797 {
798 	__asm __volatile("ltr %0" : : "r" (sel));
799 }
800 
801 static __inline u_int64_t
802 rdr0(void)
803 {
804 	u_int64_t data;
805 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
806 	return (data);
807 }
808 
809 static __inline void
810 load_dr0(u_int64_t dr0)
811 {
812 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
813 }
814 
815 static __inline u_int64_t
816 rdr1(void)
817 {
818 	u_int64_t data;
819 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
820 	return (data);
821 }
822 
823 static __inline void
824 load_dr1(u_int64_t dr1)
825 {
826 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
827 }
828 
829 static __inline u_int64_t
830 rdr2(void)
831 {
832 	u_int64_t data;
833 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
834 	return (data);
835 }
836 
837 static __inline void
838 load_dr2(u_int64_t dr2)
839 {
840 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
841 }
842 
843 static __inline u_int64_t
844 rdr3(void)
845 {
846 	u_int64_t data;
847 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
848 	return (data);
849 }
850 
851 static __inline void
852 load_dr3(u_int64_t dr3)
853 {
854 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
855 }
856 
857 static __inline u_int64_t
858 rdr4(void)
859 {
860 	u_int64_t data;
861 	__asm __volatile("movq %%dr4,%0" : "=r" (data));
862 	return (data);
863 }
864 
865 static __inline void
866 load_dr4(u_int64_t dr4)
867 {
868 	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
869 }
870 
871 static __inline u_int64_t
872 rdr5(void)
873 {
874 	u_int64_t data;
875 	__asm __volatile("movq %%dr5,%0" : "=r" (data));
876 	return (data);
877 }
878 
879 static __inline void
880 load_dr5(u_int64_t dr5)
881 {
882 	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
883 }
884 
885 static __inline u_int64_t
886 rdr6(void)
887 {
888 	u_int64_t data;
889 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
890 	return (data);
891 }
892 
893 static __inline void
894 load_dr6(u_int64_t dr6)
895 {
896 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
897 }
898 
899 static __inline u_int64_t
900 rdr7(void)
901 {
902 	u_int64_t data;
903 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
904 	return (data);
905 }
906 
907 static __inline void
908 load_dr7(u_int64_t dr7)
909 {
910 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
911 }
912 
913 static __inline register_t
914 intr_disable(void)
915 {
916 	register_t rflags;
917 
918 	rflags = read_rflags();
919 	cpu_disable_intr();
920 	return (rflags);
921 }
922 
923 static __inline void
924 intr_restore(register_t rflags)
925 {
926 	write_rflags(rflags);
927 }
928 
929 #else /* !__GNUC__ */
930 
931 int	breakpoint(void);
932 void	cpu_pause(void);
933 u_int	bsfl(u_int mask);
934 u_int	bsrl(u_int mask);
935 void	cpu_disable_intr(void);
936 void	cpu_enable_intr(void);
937 void	cpu_invlpg(u_long addr);
938 void	cpu_invlpg_range(u_long start, u_long end);
939 void	do_cpuid(u_int ax, u_int *p);
940 void	halt(void);
941 u_char	inb(u_int port);
942 u_int	inl(u_int port);
943 void	insb(u_int port, void *addr, size_t cnt);
944 void	insl(u_int port, void *addr, size_t cnt);
945 void	insw(u_int port, void *addr, size_t cnt);
946 void	invd(void);
947 void	invlpg_range(u_int start, u_int end);
948 void	cpu_invltlb(void);
949 u_short	inw(u_int port);
950 void	load_cr0(u_int cr0);
951 void	load_cr3(u_int cr3);
952 void	load_cr4(u_int cr4);
953 void	load_fs(u_int sel);
954 void	load_gs(u_int sel);
955 struct region_descriptor;
956 void	lidt(struct region_descriptor *addr);
957 void	lldt(u_short sel);
958 void	ltr(u_short sel);
959 void	outb(u_int port, u_char data);
960 void	outl(u_int port, u_int data);
961 void	outsb(u_int port, void *addr, size_t cnt);
962 void	outsl(u_int port, void *addr, size_t cnt);
963 void	outsw(u_int port, void *addr, size_t cnt);
964 void	outw(u_int port, u_short data);
965 void	ia32_pause(void);
966 u_int	rcr0(void);
967 u_int	rcr2(void);
968 u_int	rcr3(void);
969 u_int	rcr4(void);
970 u_short	rfs(void);
971 u_short	rgs(void);
972 u_int64_t rdmsr(u_int msr);
973 u_int64_t rdpmc(u_int pmc);
974 u_int64_t rdtsc(void);
975 u_int	read_rflags(void);
976 void	wbinvd(void);
977 void	write_rflags(u_int rf);
978 void	wrmsr(u_int msr, u_int64_t newval);
979 u_int64_t	rdr0(void);
980 void	load_dr0(u_int64_t dr0);
981 u_int64_t	rdr1(void);
982 void	load_dr1(u_int64_t dr1);
983 u_int64_t	rdr2(void);
984 void	load_dr2(u_int64_t dr2);
985 u_int64_t	rdr3(void);
986 void	load_dr3(u_int64_t dr3);
987 u_int64_t	rdr4(void);
988 void	load_dr4(u_int64_t dr4);
989 u_int64_t	rdr5(void);
990 void	load_dr5(u_int64_t dr5);
991 u_int64_t	rdr6(void);
992 void	load_dr6(u_int64_t dr6);
993 u_int64_t	rdr7(void);
994 void	load_dr7(u_int64_t dr7);
995 register_t	intr_disable(void);
996 void	intr_restore(register_t rf);
997 
998 #endif	/* __GNUC__ */
999 
1000 int	rdmsr_safe(u_int msr, uint64_t *val);
1001 int wrmsr_safe(u_int msr, uint64_t newval);
1002 void	reset_dbregs(void);
1003 
1004 __END_DECLS
1005 
1006 #endif /* !_CPU_CPUFUNC_H_ */
1007