xref: /freebsd/sys/amd64/include/cpufunc.h (revision b0056b31)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Functions to provide access to special i386 instructions.
35  * This in included in sys/systm.h, and that file should be
36  * used in preference to this.
37  */
38 
39 #ifdef __i386__
40 #include <i386/cpufunc.h>
41 #else /* !__i386__ */
42 
43 #ifndef _MACHINE_CPUFUNC_H_
44 #define	_MACHINE_CPUFUNC_H_
45 
46 struct region_descriptor;
47 
48 #define readb(va)	(*(volatile uint8_t *) (va))
49 #define readw(va)	(*(volatile uint16_t *) (va))
50 #define readl(va)	(*(volatile uint32_t *) (va))
51 #define readq(va)	(*(volatile uint64_t *) (va))
52 
53 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
54 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
55 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
56 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
57 
58 static __inline void
breakpoint(void)59 breakpoint(void)
60 {
61 	__asm __volatile("int $3");
62 }
63 
64 #define	bsfl(mask)	__builtin_ctz(mask)
65 
66 #define	bsfq(mask)	__builtin_ctzl(mask)
67 
68 static __inline void
clflush(u_long addr)69 clflush(u_long addr)
70 {
71 
72 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
73 }
74 
75 static __inline void
clflushopt(u_long addr)76 clflushopt(u_long addr)
77 {
78 
79 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
80 }
81 
82 static __inline void
clwb(u_long addr)83 clwb(u_long addr)
84 {
85 
86 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
87 }
88 
89 static __inline void
clts(void)90 clts(void)
91 {
92 
93 	__asm __volatile("clts");
94 }
95 
96 static __inline void
disable_intr(void)97 disable_intr(void)
98 {
99 	__asm __volatile("cli" : : : "memory");
100 }
101 
102 static __inline void
do_cpuid(u_int ax,u_int * p)103 do_cpuid(u_int ax, u_int *p)
104 {
105 	__asm __volatile("cpuid"
106 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
107 	    :  "0" (ax));
108 }
109 
110 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)111 cpuid_count(u_int ax, u_int cx, u_int *p)
112 {
113 	__asm __volatile("cpuid"
114 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
115 	    :  "0" (ax), "c" (cx));
116 }
117 
118 static __inline void
enable_intr(void)119 enable_intr(void)
120 {
121 	__asm __volatile("sti");
122 }
123 
124 static __inline void
halt(void)125 halt(void)
126 {
127 	__asm __volatile("hlt");
128 }
129 
130 static __inline u_char
inb(u_int port)131 inb(u_int port)
132 {
133 	u_char	data;
134 
135 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
136 	return (data);
137 }
138 
139 static __inline u_int
inl(u_int port)140 inl(u_int port)
141 {
142 	u_int	data;
143 
144 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
145 	return (data);
146 }
147 
148 static __inline void
insb(u_int port,void * addr,size_t count)149 insb(u_int port, void *addr, size_t count)
150 {
151 	__asm __volatile("rep; insb"
152 			 : "+D" (addr), "+c" (count)
153 			 : "d" (port)
154 			 : "memory");
155 }
156 
157 static __inline void
insw(u_int port,void * addr,size_t count)158 insw(u_int port, void *addr, size_t count)
159 {
160 	__asm __volatile("rep; insw"
161 			 : "+D" (addr), "+c" (count)
162 			 : "d" (port)
163 			 : "memory");
164 }
165 
166 static __inline void
insl(u_int port,void * addr,size_t count)167 insl(u_int port, void *addr, size_t count)
168 {
169 	__asm __volatile("rep; insl"
170 			 : "+D" (addr), "+c" (count)
171 			 : "d" (port)
172 			 : "memory");
173 }
174 
175 static __inline void
invd(void)176 invd(void)
177 {
178 	__asm __volatile("invd");
179 }
180 
181 static __inline u_short
inw(u_int port)182 inw(u_int port)
183 {
184 	u_short	data;
185 
186 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
187 	return (data);
188 }
189 
190 static __inline void
outb(u_int port,u_char data)191 outb(u_int port, u_char data)
192 {
193 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
194 }
195 
196 static __inline void
outl(u_int port,u_int data)197 outl(u_int port, u_int data)
198 {
199 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
200 }
201 
202 static __inline void
outsb(u_int port,const void * addr,size_t count)203 outsb(u_int port, const void *addr, size_t count)
204 {
205 	__asm __volatile("rep; outsb"
206 			 : "+S" (addr), "+c" (count)
207 			 : "d" (port));
208 }
209 
210 static __inline void
outsw(u_int port,const void * addr,size_t count)211 outsw(u_int port, const void *addr, size_t count)
212 {
213 	__asm __volatile("rep; outsw"
214 			 : "+S" (addr), "+c" (count)
215 			 : "d" (port));
216 }
217 
218 static __inline void
outsl(u_int port,const void * addr,size_t count)219 outsl(u_int port, const void *addr, size_t count)
220 {
221 	__asm __volatile("rep; outsl"
222 			 : "+S" (addr), "+c" (count)
223 			 : "d" (port));
224 }
225 
226 static __inline void
outw(u_int port,u_short data)227 outw(u_int port, u_short data)
228 {
229 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
230 }
231 
232 static __inline u_long
popcntq(u_long mask)233 popcntq(u_long mask)
234 {
235 	u_long result;
236 
237 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
238 	return (result);
239 }
240 
241 static __inline void
lfence(void)242 lfence(void)
243 {
244 
245 	__asm __volatile("lfence" : : : "memory");
246 }
247 
248 static __inline void
mfence(void)249 mfence(void)
250 {
251 
252 	__asm __volatile("mfence" : : : "memory");
253 }
254 
255 static __inline void
sfence(void)256 sfence(void)
257 {
258 
259 	__asm __volatile("sfence" : : : "memory");
260 }
261 
262 static __inline void
ia32_pause(void)263 ia32_pause(void)
264 {
265 	__asm __volatile("pause");
266 }
267 
268 static __inline u_long
read_rflags(void)269 read_rflags(void)
270 {
271 	u_long	rf;
272 
273 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
274 	return (rf);
275 }
276 
277 static __inline uint64_t
rdmsr(u_int msr)278 rdmsr(u_int msr)
279 {
280 	uint32_t low, high;
281 
282 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
283 	return (low | ((uint64_t)high << 32));
284 }
285 
286 static __inline uint32_t
rdmsr32(u_int msr)287 rdmsr32(u_int msr)
288 {
289 	uint32_t low;
290 
291 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
292 	return (low);
293 }
294 
295 static __inline uint64_t
rdpmc(u_int pmc)296 rdpmc(u_int pmc)
297 {
298 	uint32_t low, high;
299 
300 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
301 	return (low | ((uint64_t)high << 32));
302 }
303 
304 static __inline uint64_t
rdtsc(void)305 rdtsc(void)
306 {
307 	uint32_t low, high;
308 
309 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
310 	return (low | ((uint64_t)high << 32));
311 }
312 
313 static __inline uint64_t
rdtsc_ordered_lfence(void)314 rdtsc_ordered_lfence(void)
315 {
316 	lfence();
317 	return (rdtsc());
318 }
319 
320 static __inline uint64_t
rdtsc_ordered_mfence(void)321 rdtsc_ordered_mfence(void)
322 {
323 	mfence();
324 	return (rdtsc());
325 }
326 
327 static __inline uint64_t
rdtscp(void)328 rdtscp(void)
329 {
330 	uint32_t low, high;
331 
332 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
333 	return (low | ((uint64_t)high << 32));
334 }
335 
336 static __inline uint64_t
rdtscp_aux(uint32_t * aux)337 rdtscp_aux(uint32_t *aux)
338 {
339 	uint32_t low, high;
340 
341 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux));
342 	return (low | ((uint64_t)high << 32));
343 }
344 
345 static __inline uint32_t
rdtsc32(void)346 rdtsc32(void)
347 {
348 	uint32_t rv;
349 
350 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
351 	return (rv);
352 }
353 
354 static __inline uint32_t
rdtscp32(void)355 rdtscp32(void)
356 {
357 	uint32_t rv;
358 
359 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
360 	return (rv);
361 }
362 
363 static __inline void
wbinvd(void)364 wbinvd(void)
365 {
366 	__asm __volatile("wbinvd");
367 }
368 
369 static __inline void
write_rflags(u_long rf)370 write_rflags(u_long rf)
371 {
372 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
373 }
374 
375 static __inline void
wrmsr(u_int msr,uint64_t newval)376 wrmsr(u_int msr, uint64_t newval)
377 {
378 	uint32_t low, high;
379 
380 	low = newval;
381 	high = newval >> 32;
382 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
383 }
384 
385 static __inline void
load_cr0(u_long data)386 load_cr0(u_long data)
387 {
388 
389 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
390 }
391 
392 static __inline u_long
rcr0(void)393 rcr0(void)
394 {
395 	u_long	data;
396 
397 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
398 	return (data);
399 }
400 
401 static __inline u_long
rcr2(void)402 rcr2(void)
403 {
404 	u_long	data;
405 
406 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
407 	return (data);
408 }
409 
410 static __inline void
load_cr3(u_long data)411 load_cr3(u_long data)
412 {
413 
414 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
415 }
416 
417 static __inline u_long
rcr3(void)418 rcr3(void)
419 {
420 	u_long	data;
421 
422 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
423 	return (data);
424 }
425 
426 static __inline void
load_cr4(u_long data)427 load_cr4(u_long data)
428 {
429 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
430 }
431 
432 static __inline u_long
rcr4(void)433 rcr4(void)
434 {
435 	u_long	data;
436 
437 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
438 	return (data);
439 }
440 
441 static __inline u_long
rxcr(u_int reg)442 rxcr(u_int reg)
443 {
444 	u_int low, high;
445 
446 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
447 	return (low | ((uint64_t)high << 32));
448 }
449 
450 static __inline void
load_xcr(u_int reg,u_long val)451 load_xcr(u_int reg, u_long val)
452 {
453 	u_int low, high;
454 
455 	low = val;
456 	high = val >> 32;
457 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
458 }
459 
460 /*
461  * Global TLB flush (except for thise for pages marked PG_G)
462  */
463 static __inline void
invltlb(void)464 invltlb(void)
465 {
466 
467 	load_cr3(rcr3());
468 }
469 
470 #ifndef CR4_PGE
471 #define	CR4_PGE	0x00000080	/* Page global enable */
472 #endif
473 
474 /*
475  * Perform the guaranteed invalidation of all TLB entries.  This
476  * includes the global entries, and entries in all PCIDs, not only the
477  * current context.  The function works both on non-PCID CPUs and CPUs
478  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
479  * Operations that Invalidate TLBs and Paging-Structure Caches.
480  */
481 static __inline void
invltlb_glob(void)482 invltlb_glob(void)
483 {
484 	uint64_t cr4;
485 
486 	cr4 = rcr4();
487 	load_cr4(cr4 & ~CR4_PGE);
488 	/*
489 	 * Although preemption at this point could be detrimental to
490 	 * performance, it would not lead to an error.  PG_G is simply
491 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
492 	 * is re-entered, the load_cr4() either above or below will
493 	 * modify CR4.PGE flushing the TLB.
494 	 */
495 	load_cr4(cr4 | CR4_PGE);
496 }
497 
498 /*
499  * TLB flush for an individual page (even if it has PG_G).
500  * Only works on 486+ CPUs (i386 does not have PG_G).
501  */
502 static __inline void
invlpg(u_long addr)503 invlpg(u_long addr)
504 {
505 
506 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
507 }
508 
509 #define	INVPCID_ADDR	0
510 #define	INVPCID_CTX	1
511 #define	INVPCID_CTXGLOB	2
512 #define	INVPCID_ALLCTX	3
513 
514 struct invpcid_descr {
515 	uint64_t	pcid:12 __packed;
516 	uint64_t	pad:52 __packed;
517 	uint64_t	addr;
518 } __packed;
519 
520 static __inline void
invpcid(struct invpcid_descr * d,int type)521 invpcid(struct invpcid_descr *d, int type)
522 {
523 
524 	__asm __volatile("invpcid (%0),%1"
525 	    : : "r" (d), "r" ((u_long)type) : "memory");
526 }
527 
528 static __inline u_short
rfs(void)529 rfs(void)
530 {
531 	u_short sel;
532 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
533 	return (sel);
534 }
535 
536 static __inline u_short
rgs(void)537 rgs(void)
538 {
539 	u_short sel;
540 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
541 	return (sel);
542 }
543 
544 static __inline u_short
rss(void)545 rss(void)
546 {
547 	u_short sel;
548 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
549 	return (sel);
550 }
551 
552 static __inline void
load_ds(u_short sel)553 load_ds(u_short sel)
554 {
555 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
556 }
557 
558 static __inline void
load_es(u_short sel)559 load_es(u_short sel)
560 {
561 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
562 }
563 
564 static __inline void
cpu_monitor(const void * addr,u_long extensions,u_int hints)565 cpu_monitor(const void *addr, u_long extensions, u_int hints)
566 {
567 
568 	__asm __volatile("monitor"
569 	    : : "a" (addr), "c" (extensions), "d" (hints));
570 }
571 
572 static __inline void
cpu_mwait(u_long extensions,u_int hints)573 cpu_mwait(u_long extensions, u_int hints)
574 {
575 
576 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
577 }
578 
579 static __inline uint32_t
rdpkru(void)580 rdpkru(void)
581 {
582 	uint32_t res;
583 
584 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
585 	return (res);
586 }
587 
588 static __inline void
wrpkru(uint32_t mask)589 wrpkru(uint32_t mask)
590 {
591 
592 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
593 }
594 
595 #ifdef _KERNEL
596 /* This is defined in <machine/specialreg.h> but is too painful to get to */
597 #ifndef	MSR_FSBASE
598 #define	MSR_FSBASE	0xc0000100
599 #endif
600 static __inline void
load_fs(u_short sel)601 load_fs(u_short sel)
602 {
603 	/* Preserve the fsbase value across the selector load */
604 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
605 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
606 }
607 
608 #ifndef	MSR_GSBASE
609 #define	MSR_GSBASE	0xc0000101
610 #endif
611 static __inline void
load_gs(u_short sel)612 load_gs(u_short sel)
613 {
614 	/*
615 	 * Preserve the gsbase value across the selector load.
616 	 * Note that we have to disable interrupts because the gsbase
617 	 * being trashed happens to be the kernel gsbase at the time.
618 	 */
619 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
620 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
621 }
622 #else
623 /* Usable by userland */
624 static __inline void
load_fs(u_short sel)625 load_fs(u_short sel)
626 {
627 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
628 }
629 
630 static __inline void
load_gs(u_short sel)631 load_gs(u_short sel)
632 {
633 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
634 }
635 #endif
636 
637 static __inline uint64_t
rdfsbase(void)638 rdfsbase(void)
639 {
640 	uint64_t x;
641 
642 	__asm __volatile("rdfsbase %0" : "=r" (x));
643 	return (x);
644 }
645 
646 static __inline void
wrfsbase(uint64_t x)647 wrfsbase(uint64_t x)
648 {
649 
650 	__asm __volatile("wrfsbase %0" : : "r" (x));
651 }
652 
653 static __inline uint64_t
rdgsbase(void)654 rdgsbase(void)
655 {
656 	uint64_t x;
657 
658 	__asm __volatile("rdgsbase %0" : "=r" (x));
659 	return (x);
660 }
661 
662 static __inline void
wrgsbase(uint64_t x)663 wrgsbase(uint64_t x)
664 {
665 
666 	__asm __volatile("wrgsbase %0" : : "r" (x));
667 }
668 
669 static __inline void
bare_lgdt(struct region_descriptor * addr)670 bare_lgdt(struct region_descriptor *addr)
671 {
672 	__asm __volatile("lgdt (%0)" : : "r" (addr));
673 }
674 
675 static __inline void
sgdt(struct region_descriptor * addr)676 sgdt(struct region_descriptor *addr)
677 {
678 	char *loc;
679 
680 	loc = (char *)addr;
681 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
682 }
683 
684 static __inline void
lidt(struct region_descriptor * addr)685 lidt(struct region_descriptor *addr)
686 {
687 	__asm __volatile("lidt (%0)" : : "r" (addr));
688 }
689 
690 static __inline void
sidt(struct region_descriptor * addr)691 sidt(struct region_descriptor *addr)
692 {
693 	char *loc;
694 
695 	loc = (char *)addr;
696 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
697 }
698 
699 static __inline void
lldt(u_short sel)700 lldt(u_short sel)
701 {
702 	__asm __volatile("lldt %0" : : "r" (sel));
703 }
704 
705 static __inline u_short
sldt(void)706 sldt(void)
707 {
708 	u_short sel;
709 
710 	__asm __volatile("sldt %0" : "=r" (sel));
711 	return (sel);
712 }
713 
714 static __inline void
ltr(u_short sel)715 ltr(u_short sel)
716 {
717 	__asm __volatile("ltr %0" : : "r" (sel));
718 }
719 
720 static __inline uint32_t
read_tr(void)721 read_tr(void)
722 {
723 	u_short sel;
724 
725 	__asm __volatile("str %0" : "=r" (sel));
726 	return (sel);
727 }
728 
729 static __inline uint64_t
rdr0(void)730 rdr0(void)
731 {
732 	uint64_t data;
733 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
734 	return (data);
735 }
736 
737 static __inline void
load_dr0(uint64_t dr0)738 load_dr0(uint64_t dr0)
739 {
740 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
741 }
742 
743 static __inline uint64_t
rdr1(void)744 rdr1(void)
745 {
746 	uint64_t data;
747 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
748 	return (data);
749 }
750 
751 static __inline void
load_dr1(uint64_t dr1)752 load_dr1(uint64_t dr1)
753 {
754 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
755 }
756 
757 static __inline uint64_t
rdr2(void)758 rdr2(void)
759 {
760 	uint64_t data;
761 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
762 	return (data);
763 }
764 
765 static __inline void
load_dr2(uint64_t dr2)766 load_dr2(uint64_t dr2)
767 {
768 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
769 }
770 
771 static __inline uint64_t
rdr3(void)772 rdr3(void)
773 {
774 	uint64_t data;
775 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
776 	return (data);
777 }
778 
779 static __inline void
load_dr3(uint64_t dr3)780 load_dr3(uint64_t dr3)
781 {
782 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
783 }
784 
785 static __inline uint64_t
rdr6(void)786 rdr6(void)
787 {
788 	uint64_t data;
789 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
790 	return (data);
791 }
792 
793 static __inline void
load_dr6(uint64_t dr6)794 load_dr6(uint64_t dr6)
795 {
796 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
797 }
798 
799 static __inline uint64_t
rdr7(void)800 rdr7(void)
801 {
802 	uint64_t data;
803 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
804 	return (data);
805 }
806 
807 static __inline void
load_dr7(uint64_t dr7)808 load_dr7(uint64_t dr7)
809 {
810 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
811 }
812 
813 static __inline register_t
intr_disable(void)814 intr_disable(void)
815 {
816 	register_t rflags;
817 
818 	rflags = read_rflags();
819 	disable_intr();
820 	return (rflags);
821 }
822 
823 static __inline void
intr_restore(register_t rflags)824 intr_restore(register_t rflags)
825 {
826 	write_rflags(rflags);
827 }
828 
829 static __inline void
stac(void)830 stac(void)
831 {
832 
833 	__asm __volatile("stac" : : : "cc");
834 }
835 
836 static __inline void
clac(void)837 clac(void)
838 {
839 
840 	__asm __volatile("clac" : : : "cc");
841 }
842 
843 enum {
844 	SGX_ECREATE	= 0x0,
845 	SGX_EADD	= 0x1,
846 	SGX_EINIT	= 0x2,
847 	SGX_EREMOVE	= 0x3,
848 	SGX_EDGBRD	= 0x4,
849 	SGX_EDGBWR	= 0x5,
850 	SGX_EEXTEND	= 0x6,
851 	SGX_ELDU	= 0x8,
852 	SGX_EBLOCK	= 0x9,
853 	SGX_EPA		= 0xA,
854 	SGX_EWB		= 0xB,
855 	SGX_ETRACK	= 0xC,
856 };
857 
858 enum {
859 	SGX_PT_SECS = 0x00,
860 	SGX_PT_TCS  = 0x01,
861 	SGX_PT_REG  = 0x02,
862 	SGX_PT_VA   = 0x03,
863 	SGX_PT_TRIM = 0x04,
864 };
865 
866 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
867 
868 static __inline int
sgx_ecreate(void * pginfo,void * secs)869 sgx_ecreate(void *pginfo, void *secs)
870 {
871 
872 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
873 	    (uint64_t)secs, 0));
874 }
875 
876 static __inline int
sgx_eadd(void * pginfo,void * epc)877 sgx_eadd(void *pginfo, void *epc)
878 {
879 
880 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
881 	    (uint64_t)epc, 0));
882 }
883 
884 static __inline int
sgx_einit(void * sigstruct,void * secs,void * einittoken)885 sgx_einit(void *sigstruct, void *secs, void *einittoken)
886 {
887 
888 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
889 	    (uint64_t)secs, (uint64_t)einittoken));
890 }
891 
892 static __inline int
sgx_eextend(void * secs,void * epc)893 sgx_eextend(void *secs, void *epc)
894 {
895 
896 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
897 	    (uint64_t)epc, 0));
898 }
899 
900 static __inline int
sgx_epa(void * epc)901 sgx_epa(void *epc)
902 {
903 
904 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
905 }
906 
907 static __inline int
sgx_eldu(uint64_t rbx,uint64_t rcx,uint64_t rdx)908 sgx_eldu(uint64_t rbx, uint64_t rcx,
909     uint64_t rdx)
910 {
911 
912 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
913 }
914 
915 static __inline int
sgx_eremove(void * epc)916 sgx_eremove(void *epc)
917 {
918 
919 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
920 }
921 
922 void	reset_dbregs(void);
923 
924 #ifdef _KERNEL
925 int	rdmsr_safe(u_int msr, uint64_t *val);
926 int	wrmsr_safe(u_int msr, uint64_t newval);
927 #endif
928 
929 #endif /* !_MACHINE_CPUFUNC_H_ */
930 
931 #endif /* __i386__ */
932