xref: /freebsd/sys/amd64/include/cpufunc.h (revision 1f474190)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * Functions to provide access to special i386 instructions.
37  * This in included in sys/systm.h, and that file should be
38  * used in preference to this.
39  */
40 
41 #ifndef _MACHINE_CPUFUNC_H_
42 #define	_MACHINE_CPUFUNC_H_
43 
44 #ifndef _SYS_CDEFS_H_
45 #error this file needs sys/cdefs.h as a prerequisite
46 #endif
47 
48 struct region_descriptor;
49 
50 #define readb(va)	(*(volatile uint8_t *) (va))
51 #define readw(va)	(*(volatile uint16_t *) (va))
52 #define readl(va)	(*(volatile uint32_t *) (va))
53 #define readq(va)	(*(volatile uint64_t *) (va))
54 
55 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59 
60 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
61 
62 static __inline void
63 breakpoint(void)
64 {
65 	__asm __volatile("int $3");
66 }
67 
68 static __inline __pure2 u_int
69 bsfl(u_int mask)
70 {
71 	u_int	result;
72 
73 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
74 	return (result);
75 }
76 
77 static __inline __pure2 u_long
78 bsfq(u_long mask)
79 {
80 	u_long	result;
81 
82 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
83 	return (result);
84 }
85 
86 static __inline __pure2 u_int
87 bsrl(u_int mask)
88 {
89 	u_int	result;
90 
91 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
92 	return (result);
93 }
94 
95 static __inline __pure2 u_long
96 bsrq(u_long mask)
97 {
98 	u_long	result;
99 
100 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
101 	return (result);
102 }
103 
104 static __inline void
105 clflush(u_long addr)
106 {
107 
108 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
109 }
110 
111 static __inline void
112 clflushopt(u_long addr)
113 {
114 
115 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
116 }
117 
118 static __inline void
119 clwb(u_long addr)
120 {
121 
122 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
123 }
124 
125 static __inline void
126 clts(void)
127 {
128 
129 	__asm __volatile("clts");
130 }
131 
132 static __inline void
133 disable_intr(void)
134 {
135 	__asm __volatile("cli" : : : "memory");
136 }
137 
138 static __inline void
139 do_cpuid(u_int ax, u_int *p)
140 {
141 	__asm __volatile("cpuid"
142 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
143 			 :  "0" (ax));
144 }
145 
146 static __inline void
147 cpuid_count(u_int ax, u_int cx, u_int *p)
148 {
149 	__asm __volatile("cpuid"
150 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
151 			 :  "0" (ax), "c" (cx));
152 }
153 
154 static __inline void
155 enable_intr(void)
156 {
157 	__asm __volatile("sti");
158 }
159 
160 #ifdef _KERNEL
161 
162 #define	HAVE_INLINE_FFS
163 #define        ffs(x)  __builtin_ffs(x)
164 
165 #define	HAVE_INLINE_FFSL
166 
167 static __inline __pure2 int
168 ffsl(long mask)
169 {
170 
171 	return (__builtin_ffsl(mask));
172 }
173 
174 #define	HAVE_INLINE_FFSLL
175 
176 static __inline __pure2 int
177 ffsll(long long mask)
178 {
179 	return (ffsl((long)mask));
180 }
181 
182 #define	HAVE_INLINE_FLS
183 
184 static __inline __pure2 int
185 fls(int mask)
186 {
187 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
188 }
189 
190 #define	HAVE_INLINE_FLSL
191 
192 static __inline __pure2 int
193 flsl(long mask)
194 {
195 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
196 }
197 
198 #define	HAVE_INLINE_FLSLL
199 
200 static __inline __pure2 int
201 flsll(long long mask)
202 {
203 	return (flsl((long)mask));
204 }
205 
206 #endif /* _KERNEL */
207 
208 static __inline void
209 halt(void)
210 {
211 	__asm __volatile("hlt");
212 }
213 
214 static __inline u_char
215 inb(u_int port)
216 {
217 	u_char	data;
218 
219 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
220 	return (data);
221 }
222 
223 static __inline u_int
224 inl(u_int port)
225 {
226 	u_int	data;
227 
228 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
229 	return (data);
230 }
231 
232 static __inline void
233 insb(u_int port, void *addr, size_t count)
234 {
235 	__asm __volatile("rep; insb"
236 			 : "+D" (addr), "+c" (count)
237 			 : "d" (port)
238 			 : "memory");
239 }
240 
241 static __inline void
242 insw(u_int port, void *addr, size_t count)
243 {
244 	__asm __volatile("rep; insw"
245 			 : "+D" (addr), "+c" (count)
246 			 : "d" (port)
247 			 : "memory");
248 }
249 
250 static __inline void
251 insl(u_int port, void *addr, size_t count)
252 {
253 	__asm __volatile("rep; insl"
254 			 : "+D" (addr), "+c" (count)
255 			 : "d" (port)
256 			 : "memory");
257 }
258 
259 static __inline void
260 invd(void)
261 {
262 	__asm __volatile("invd");
263 }
264 
265 static __inline u_short
266 inw(u_int port)
267 {
268 	u_short	data;
269 
270 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
271 	return (data);
272 }
273 
274 static __inline void
275 outb(u_int port, u_char data)
276 {
277 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
278 }
279 
280 static __inline void
281 outl(u_int port, u_int data)
282 {
283 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
284 }
285 
286 static __inline void
287 outsb(u_int port, const void *addr, size_t count)
288 {
289 	__asm __volatile("rep; outsb"
290 			 : "+S" (addr), "+c" (count)
291 			 : "d" (port));
292 }
293 
294 static __inline void
295 outsw(u_int port, const void *addr, size_t count)
296 {
297 	__asm __volatile("rep; outsw"
298 			 : "+S" (addr), "+c" (count)
299 			 : "d" (port));
300 }
301 
302 static __inline void
303 outsl(u_int port, const void *addr, size_t count)
304 {
305 	__asm __volatile("rep; outsl"
306 			 : "+S" (addr), "+c" (count)
307 			 : "d" (port));
308 }
309 
310 static __inline void
311 outw(u_int port, u_short data)
312 {
313 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
314 }
315 
316 static __inline u_long
317 popcntq(u_long mask)
318 {
319 	u_long result;
320 
321 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
322 	return (result);
323 }
324 
325 static __inline void
326 lfence(void)
327 {
328 
329 	__asm __volatile("lfence" : : : "memory");
330 }
331 
332 static __inline void
333 mfence(void)
334 {
335 
336 	__asm __volatile("mfence" : : : "memory");
337 }
338 
339 static __inline void
340 sfence(void)
341 {
342 
343 	__asm __volatile("sfence" : : : "memory");
344 }
345 
346 static __inline void
347 ia32_pause(void)
348 {
349 	__asm __volatile("pause");
350 }
351 
352 static __inline u_long
353 read_rflags(void)
354 {
355 	u_long	rf;
356 
357 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
358 	return (rf);
359 }
360 
361 static __inline uint64_t
362 rdmsr(u_int msr)
363 {
364 	uint32_t low, high;
365 
366 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
367 	return (low | ((uint64_t)high << 32));
368 }
369 
370 static __inline uint32_t
371 rdmsr32(u_int msr)
372 {
373 	uint32_t low;
374 
375 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
376 	return (low);
377 }
378 
379 static __inline uint64_t
380 rdpmc(u_int pmc)
381 {
382 	uint32_t low, high;
383 
384 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
385 	return (low | ((uint64_t)high << 32));
386 }
387 
388 static __inline uint64_t
389 rdtsc(void)
390 {
391 	uint32_t low, high;
392 
393 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
394 	return (low | ((uint64_t)high << 32));
395 }
396 
397 static __inline uint64_t
398 rdtscp(void)
399 {
400 	uint32_t low, high;
401 
402 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
403 	return (low | ((uint64_t)high << 32));
404 }
405 
406 static __inline uint32_t
407 rdtsc32(void)
408 {
409 	uint32_t rv;
410 
411 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
412 	return (rv);
413 }
414 
415 static __inline void
416 wbinvd(void)
417 {
418 	__asm __volatile("wbinvd");
419 }
420 
421 static __inline void
422 write_rflags(u_long rf)
423 {
424 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
425 }
426 
427 static __inline void
428 wrmsr(u_int msr, uint64_t newval)
429 {
430 	uint32_t low, high;
431 
432 	low = newval;
433 	high = newval >> 32;
434 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
435 }
436 
437 static __inline void
438 load_cr0(u_long data)
439 {
440 
441 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
442 }
443 
444 static __inline u_long
445 rcr0(void)
446 {
447 	u_long	data;
448 
449 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
450 	return (data);
451 }
452 
453 static __inline u_long
454 rcr2(void)
455 {
456 	u_long	data;
457 
458 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
459 	return (data);
460 }
461 
462 static __inline void
463 load_cr3(u_long data)
464 {
465 
466 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
467 }
468 
469 static __inline u_long
470 rcr3(void)
471 {
472 	u_long	data;
473 
474 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
475 	return (data);
476 }
477 
478 static __inline void
479 load_cr4(u_long data)
480 {
481 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
482 }
483 
484 static __inline u_long
485 rcr4(void)
486 {
487 	u_long	data;
488 
489 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
490 	return (data);
491 }
492 
493 static __inline u_long
494 rxcr(u_int reg)
495 {
496 	u_int low, high;
497 
498 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
499 	return (low | ((uint64_t)high << 32));
500 }
501 
502 static __inline void
503 load_xcr(u_int reg, u_long val)
504 {
505 	u_int low, high;
506 
507 	low = val;
508 	high = val >> 32;
509 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
510 }
511 
512 /*
513  * Global TLB flush (except for thise for pages marked PG_G)
514  */
515 static __inline void
516 invltlb(void)
517 {
518 
519 	load_cr3(rcr3());
520 }
521 
522 #ifndef CR4_PGE
523 #define	CR4_PGE	0x00000080	/* Page global enable */
524 #endif
525 
526 /*
527  * Perform the guaranteed invalidation of all TLB entries.  This
528  * includes the global entries, and entries in all PCIDs, not only the
529  * current context.  The function works both on non-PCID CPUs and CPUs
530  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
531  * Operations that Invalidate TLBs and Paging-Structure Caches.
532  */
533 static __inline void
534 invltlb_glob(void)
535 {
536 	uint64_t cr4;
537 
538 	cr4 = rcr4();
539 	load_cr4(cr4 & ~CR4_PGE);
540 	/*
541 	 * Although preemption at this point could be detrimental to
542 	 * performance, it would not lead to an error.  PG_G is simply
543 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
544 	 * is re-entered, the load_cr4() either above or below will
545 	 * modify CR4.PGE flushing the TLB.
546 	 */
547 	load_cr4(cr4 | CR4_PGE);
548 }
549 
550 /*
551  * TLB flush for an individual page (even if it has PG_G).
552  * Only works on 486+ CPUs (i386 does not have PG_G).
553  */
554 static __inline void
555 invlpg(u_long addr)
556 {
557 
558 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
559 }
560 
561 #define	INVPCID_ADDR	0
562 #define	INVPCID_CTX	1
563 #define	INVPCID_CTXGLOB	2
564 #define	INVPCID_ALLCTX	3
565 
566 struct invpcid_descr {
567 	uint64_t	pcid:12 __packed;
568 	uint64_t	pad:52 __packed;
569 	uint64_t	addr;
570 } __packed;
571 
572 static __inline void
573 invpcid(struct invpcid_descr *d, int type)
574 {
575 
576 	__asm __volatile("invpcid (%0),%1"
577 	    : : "r" (d), "r" ((u_long)type) : "memory");
578 }
579 
580 static __inline u_short
581 rfs(void)
582 {
583 	u_short sel;
584 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
585 	return (sel);
586 }
587 
588 static __inline u_short
589 rgs(void)
590 {
591 	u_short sel;
592 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
593 	return (sel);
594 }
595 
596 static __inline u_short
597 rss(void)
598 {
599 	u_short sel;
600 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
601 	return (sel);
602 }
603 
604 static __inline void
605 load_ds(u_short sel)
606 {
607 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
608 }
609 
610 static __inline void
611 load_es(u_short sel)
612 {
613 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
614 }
615 
616 static __inline void
617 cpu_monitor(const void *addr, u_long extensions, u_int hints)
618 {
619 
620 	__asm __volatile("monitor"
621 	    : : "a" (addr), "c" (extensions), "d" (hints));
622 }
623 
624 static __inline void
625 cpu_mwait(u_long extensions, u_int hints)
626 {
627 
628 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
629 }
630 
631 static __inline uint32_t
632 rdpkru(void)
633 {
634 	uint32_t res;
635 
636 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
637 	return (res);
638 }
639 
640 static __inline void
641 wrpkru(uint32_t mask)
642 {
643 
644 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
645 }
646 
647 #ifdef _KERNEL
648 /* This is defined in <machine/specialreg.h> but is too painful to get to */
649 #ifndef	MSR_FSBASE
650 #define	MSR_FSBASE	0xc0000100
651 #endif
652 static __inline void
653 load_fs(u_short sel)
654 {
655 	/* Preserve the fsbase value across the selector load */
656 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
657 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
658 }
659 
660 #ifndef	MSR_GSBASE
661 #define	MSR_GSBASE	0xc0000101
662 #endif
663 static __inline void
664 load_gs(u_short sel)
665 {
666 	/*
667 	 * Preserve the gsbase value across the selector load.
668 	 * Note that we have to disable interrupts because the gsbase
669 	 * being trashed happens to be the kernel gsbase at the time.
670 	 */
671 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
672 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
673 }
674 #else
675 /* Usable by userland */
676 static __inline void
677 load_fs(u_short sel)
678 {
679 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
680 }
681 
682 static __inline void
683 load_gs(u_short sel)
684 {
685 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
686 }
687 #endif
688 
689 static __inline uint64_t
690 rdfsbase(void)
691 {
692 	uint64_t x;
693 
694 	__asm __volatile("rdfsbase %0" : "=r" (x));
695 	return (x);
696 }
697 
698 static __inline void
699 wrfsbase(uint64_t x)
700 {
701 
702 	__asm __volatile("wrfsbase %0" : : "r" (x));
703 }
704 
705 static __inline uint64_t
706 rdgsbase(void)
707 {
708 	uint64_t x;
709 
710 	__asm __volatile("rdgsbase %0" : "=r" (x));
711 	return (x);
712 }
713 
714 static __inline void
715 wrgsbase(uint64_t x)
716 {
717 
718 	__asm __volatile("wrgsbase %0" : : "r" (x));
719 }
720 
721 static __inline void
722 bare_lgdt(struct region_descriptor *addr)
723 {
724 	__asm __volatile("lgdt (%0)" : : "r" (addr));
725 }
726 
727 static __inline void
728 sgdt(struct region_descriptor *addr)
729 {
730 	char *loc;
731 
732 	loc = (char *)addr;
733 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
734 }
735 
736 static __inline void
737 lidt(struct region_descriptor *addr)
738 {
739 	__asm __volatile("lidt (%0)" : : "r" (addr));
740 }
741 
742 static __inline void
743 sidt(struct region_descriptor *addr)
744 {
745 	char *loc;
746 
747 	loc = (char *)addr;
748 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
749 }
750 
751 static __inline void
752 lldt(u_short sel)
753 {
754 	__asm __volatile("lldt %0" : : "r" (sel));
755 }
756 
757 static __inline u_short
758 sldt(void)
759 {
760 	u_short sel;
761 
762 	__asm __volatile("sldt %0" : "=r" (sel));
763 	return (sel);
764 }
765 
766 static __inline void
767 ltr(u_short sel)
768 {
769 	__asm __volatile("ltr %0" : : "r" (sel));
770 }
771 
772 static __inline uint32_t
773 read_tr(void)
774 {
775 	u_short sel;
776 
777 	__asm __volatile("str %0" : "=r" (sel));
778 	return (sel);
779 }
780 
781 static __inline uint64_t
782 rdr0(void)
783 {
784 	uint64_t data;
785 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
786 	return (data);
787 }
788 
789 static __inline void
790 load_dr0(uint64_t dr0)
791 {
792 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
793 }
794 
795 static __inline uint64_t
796 rdr1(void)
797 {
798 	uint64_t data;
799 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
800 	return (data);
801 }
802 
803 static __inline void
804 load_dr1(uint64_t dr1)
805 {
806 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
807 }
808 
809 static __inline uint64_t
810 rdr2(void)
811 {
812 	uint64_t data;
813 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
814 	return (data);
815 }
816 
817 static __inline void
818 load_dr2(uint64_t dr2)
819 {
820 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
821 }
822 
823 static __inline uint64_t
824 rdr3(void)
825 {
826 	uint64_t data;
827 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
828 	return (data);
829 }
830 
831 static __inline void
832 load_dr3(uint64_t dr3)
833 {
834 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
835 }
836 
837 static __inline uint64_t
838 rdr6(void)
839 {
840 	uint64_t data;
841 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
842 	return (data);
843 }
844 
845 static __inline void
846 load_dr6(uint64_t dr6)
847 {
848 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
849 }
850 
851 static __inline uint64_t
852 rdr7(void)
853 {
854 	uint64_t data;
855 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
856 	return (data);
857 }
858 
859 static __inline void
860 load_dr7(uint64_t dr7)
861 {
862 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
863 }
864 
865 static __inline register_t
866 intr_disable(void)
867 {
868 	register_t rflags;
869 
870 	rflags = read_rflags();
871 	disable_intr();
872 	return (rflags);
873 }
874 
875 static __inline void
876 intr_restore(register_t rflags)
877 {
878 	write_rflags(rflags);
879 }
880 
881 static __inline void
882 stac(void)
883 {
884 
885 	__asm __volatile("stac" : : : "cc");
886 }
887 
888 static __inline void
889 clac(void)
890 {
891 
892 	__asm __volatile("clac" : : : "cc");
893 }
894 
895 enum {
896 	SGX_ECREATE	= 0x0,
897 	SGX_EADD	= 0x1,
898 	SGX_EINIT	= 0x2,
899 	SGX_EREMOVE	= 0x3,
900 	SGX_EDGBRD	= 0x4,
901 	SGX_EDGBWR	= 0x5,
902 	SGX_EEXTEND	= 0x6,
903 	SGX_ELDU	= 0x8,
904 	SGX_EBLOCK	= 0x9,
905 	SGX_EPA		= 0xA,
906 	SGX_EWB		= 0xB,
907 	SGX_ETRACK	= 0xC,
908 };
909 
910 enum {
911 	SGX_PT_SECS = 0x00,
912 	SGX_PT_TCS  = 0x01,
913 	SGX_PT_REG  = 0x02,
914 	SGX_PT_VA   = 0x03,
915 	SGX_PT_TRIM = 0x04,
916 };
917 
918 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
919 
920 static __inline int
921 sgx_ecreate(void *pginfo, void *secs)
922 {
923 
924 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
925 	    (uint64_t)secs, 0));
926 }
927 
928 static __inline int
929 sgx_eadd(void *pginfo, void *epc)
930 {
931 
932 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
933 	    (uint64_t)epc, 0));
934 }
935 
936 static __inline int
937 sgx_einit(void *sigstruct, void *secs, void *einittoken)
938 {
939 
940 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
941 	    (uint64_t)secs, (uint64_t)einittoken));
942 }
943 
944 static __inline int
945 sgx_eextend(void *secs, void *epc)
946 {
947 
948 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
949 	    (uint64_t)epc, 0));
950 }
951 
952 static __inline int
953 sgx_epa(void *epc)
954 {
955 
956 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
957 }
958 
959 static __inline int
960 sgx_eldu(uint64_t rbx, uint64_t rcx,
961     uint64_t rdx)
962 {
963 
964 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
965 }
966 
967 static __inline int
968 sgx_eremove(void *epc)
969 {
970 
971 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
972 }
973 
974 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
975 
976 int	breakpoint(void);
977 u_int	bsfl(u_int mask);
978 u_int	bsrl(u_int mask);
979 void	clflush(u_long addr);
980 void	clts(void);
981 void	cpuid_count(u_int ax, u_int cx, u_int *p);
982 void	disable_intr(void);
983 void	do_cpuid(u_int ax, u_int *p);
984 void	enable_intr(void);
985 void	halt(void);
986 void	ia32_pause(void);
987 u_char	inb(u_int port);
988 u_int	inl(u_int port);
989 void	insb(u_int port, void *addr, size_t count);
990 void	insl(u_int port, void *addr, size_t count);
991 void	insw(u_int port, void *addr, size_t count);
992 register_t	intr_disable(void);
993 void	intr_restore(register_t rf);
994 void	invd(void);
995 void	invlpg(u_int addr);
996 void	invltlb(void);
997 u_short	inw(u_int port);
998 void	lidt(struct region_descriptor *addr);
999 void	lldt(u_short sel);
1000 void	load_cr0(u_long cr0);
1001 void	load_cr3(u_long cr3);
1002 void	load_cr4(u_long cr4);
1003 void	load_dr0(uint64_t dr0);
1004 void	load_dr1(uint64_t dr1);
1005 void	load_dr2(uint64_t dr2);
1006 void	load_dr3(uint64_t dr3);
1007 void	load_dr6(uint64_t dr6);
1008 void	load_dr7(uint64_t dr7);
1009 void	load_fs(u_short sel);
1010 void	load_gs(u_short sel);
1011 void	ltr(u_short sel);
1012 void	outb(u_int port, u_char data);
1013 void	outl(u_int port, u_int data);
1014 void	outsb(u_int port, const void *addr, size_t count);
1015 void	outsl(u_int port, const void *addr, size_t count);
1016 void	outsw(u_int port, const void *addr, size_t count);
1017 void	outw(u_int port, u_short data);
1018 u_long	rcr0(void);
1019 u_long	rcr2(void);
1020 u_long	rcr3(void);
1021 u_long	rcr4(void);
1022 uint64_t rdmsr(u_int msr);
1023 uint32_t rdmsr32(u_int msr);
1024 uint64_t rdpmc(u_int pmc);
1025 uint64_t rdr0(void);
1026 uint64_t rdr1(void);
1027 uint64_t rdr2(void);
1028 uint64_t rdr3(void);
1029 uint64_t rdr6(void);
1030 uint64_t rdr7(void);
1031 uint64_t rdtsc(void);
1032 u_long	read_rflags(void);
1033 u_int	rfs(void);
1034 u_int	rgs(void);
1035 void	wbinvd(void);
1036 void	write_rflags(u_int rf);
1037 void	wrmsr(u_int msr, uint64_t newval);
1038 
1039 #endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
1040 
1041 void	reset_dbregs(void);
1042 
1043 #ifdef _KERNEL
1044 int	rdmsr_safe(u_int msr, uint64_t *val);
1045 int	wrmsr_safe(u_int msr, uint64_t newval);
1046 #endif
1047 
1048 #endif /* !_MACHINE_CPUFUNC_H_ */
1049