xref: /freebsd/sys/i386/include/cpufunc.h (revision 95ee2897)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * Functions to provide access to special i386 instructions.
34  * This in included in sys/systm.h, and that file should be
35  * used in preference to this.
36  */
37 
38 #ifndef _MACHINE_CPUFUNC_H_
39 #define	_MACHINE_CPUFUNC_H_
40 
41 struct region_descriptor;
42 
43 #define readb(va)	(*(volatile uint8_t *) (va))
44 #define readw(va)	(*(volatile uint16_t *) (va))
45 #define readl(va)	(*(volatile uint32_t *) (va))
46 
47 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
48 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
49 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
50 
51 static __inline void
breakpoint(void)52 breakpoint(void)
53 {
54 	__asm __volatile("int $3");
55 }
56 
57 static __inline __pure2 u_int
bsfl(u_int mask)58 bsfl(u_int mask)
59 {
60 	u_int	result;
61 
62 	__asm("bsfl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
63 	return (result);
64 }
65 
66 static __inline __pure2 u_int
bsrl(u_int mask)67 bsrl(u_int mask)
68 {
69 	u_int	result;
70 
71 	__asm("bsrl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
72 	return (result);
73 }
74 
75 static __inline void
clflush(u_long addr)76 clflush(u_long addr)
77 {
78 
79 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
80 }
81 
82 static __inline void
clflushopt(u_long addr)83 clflushopt(u_long addr)
84 {
85 
86 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
87 }
88 
89 static __inline void
clts(void)90 clts(void)
91 {
92 
93 	__asm __volatile("clts");
94 }
95 
96 static __inline void
disable_intr(void)97 disable_intr(void)
98 {
99 	__asm __volatile("cli" : : : "memory");
100 }
101 
102 #ifdef _KERNEL
103 static __inline void
do_cpuid(u_int ax,u_int * p)104 do_cpuid(u_int ax, u_int *p)
105 {
106 	__asm __volatile("cpuid"
107 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
108 	    :  "0" (ax));
109 }
110 
111 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)112 cpuid_count(u_int ax, u_int cx, u_int *p)
113 {
114 	__asm __volatile("cpuid"
115 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
116 	    :  "0" (ax), "c" (cx));
117 }
118 #else
119 static __inline void
do_cpuid(u_int ax,u_int * p)120 do_cpuid(u_int ax, u_int *p)
121 {
122 	__asm __volatile(
123 	    "pushl\t%%ebx\n\t"
124 	    "cpuid\n\t"
125 	    "movl\t%%ebx,%1\n\t"
126 	    "popl\t%%ebx"
127 	    : "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
128 	    :  "0" (ax));
129 }
130 
131 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)132 cpuid_count(u_int ax, u_int cx, u_int *p)
133 {
134 	__asm __volatile(
135 	    "pushl\t%%ebx\n\t"
136 	    "cpuid\n\t"
137 	    "movl\t%%ebx,%1\n\t"
138 	    "popl\t%%ebx"
139 	    : "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
140 	    :  "0" (ax), "c" (cx));
141 }
142 #endif
143 
144 static __inline void
enable_intr(void)145 enable_intr(void)
146 {
147 	__asm __volatile("sti");
148 }
149 
150 static __inline void
cpu_monitor(const void * addr,u_long extensions,u_int hints)151 cpu_monitor(const void *addr, u_long extensions, u_int hints)
152 {
153 	__asm __volatile("monitor"
154 	    : : "a" (addr), "c" (extensions), "d" (hints));
155 }
156 
157 static __inline void
cpu_mwait(u_long extensions,u_int hints)158 cpu_mwait(u_long extensions, u_int hints)
159 {
160 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
161 }
162 
163 static __inline void
lfence(void)164 lfence(void)
165 {
166 	__asm __volatile("lfence" : : : "memory");
167 }
168 
169 static __inline void
mfence(void)170 mfence(void)
171 {
172 	__asm __volatile("mfence" : : : "memory");
173 }
174 
175 static __inline void
sfence(void)176 sfence(void)
177 {
178 	__asm __volatile("sfence" : : : "memory");
179 }
180 
181 static __inline void
halt(void)182 halt(void)
183 {
184 	__asm __volatile("hlt");
185 }
186 
187 static __inline u_char
inb(u_int port)188 inb(u_int port)
189 {
190 	u_char	data;
191 
192 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
193 	return (data);
194 }
195 
196 static __inline u_int
inl(u_int port)197 inl(u_int port)
198 {
199 	u_int	data;
200 
201 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
202 	return (data);
203 }
204 
205 static __inline void
insb(u_int port,void * addr,size_t count)206 insb(u_int port, void *addr, size_t count)
207 {
208 	__asm __volatile("cld; rep; insb"
209 			 : "+D" (addr), "+c" (count)
210 			 : "d" (port)
211 			 : "memory");
212 }
213 
214 static __inline void
insw(u_int port,void * addr,size_t count)215 insw(u_int port, void *addr, size_t count)
216 {
217 	__asm __volatile("cld; rep; insw"
218 			 : "+D" (addr), "+c" (count)
219 			 : "d" (port)
220 			 : "memory");
221 }
222 
223 static __inline void
insl(u_int port,void * addr,size_t count)224 insl(u_int port, void *addr, size_t count)
225 {
226 	__asm __volatile("cld; rep; insl"
227 			 : "+D" (addr), "+c" (count)
228 			 : "d" (port)
229 			 : "memory");
230 }
231 
232 static __inline void
invd(void)233 invd(void)
234 {
235 	__asm __volatile("invd");
236 }
237 
238 static __inline u_short
inw(u_int port)239 inw(u_int port)
240 {
241 	u_short	data;
242 
243 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
244 	return (data);
245 }
246 
247 static __inline void
outb(u_int port,u_char data)248 outb(u_int port, u_char data)
249 {
250 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
251 }
252 
253 static __inline void
outl(u_int port,u_int data)254 outl(u_int port, u_int data)
255 {
256 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
257 }
258 
259 static __inline void
outsb(u_int port,const void * addr,size_t count)260 outsb(u_int port, const void *addr, size_t count)
261 {
262 	__asm __volatile("cld; rep; outsb"
263 			 : "+S" (addr), "+c" (count)
264 			 : "d" (port));
265 }
266 
267 static __inline void
outsw(u_int port,const void * addr,size_t count)268 outsw(u_int port, const void *addr, size_t count)
269 {
270 	__asm __volatile("cld; rep; outsw"
271 			 : "+S" (addr), "+c" (count)
272 			 : "d" (port));
273 }
274 
275 static __inline void
outsl(u_int port,const void * addr,size_t count)276 outsl(u_int port, const void *addr, size_t count)
277 {
278 	__asm __volatile("cld; rep; outsl"
279 			 : "+S" (addr), "+c" (count)
280 			 : "d" (port));
281 }
282 
283 static __inline void
outw(u_int port,u_short data)284 outw(u_int port, u_short data)
285 {
286 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
287 }
288 
289 static __inline void
ia32_pause(void)290 ia32_pause(void)
291 {
292 	__asm __volatile("pause");
293 }
294 
295 static __inline u_int
read_eflags(void)296 read_eflags(void)
297 {
298 	u_int	ef;
299 
300 	__asm __volatile("pushfl; popl %0" : "=r" (ef));
301 	return (ef);
302 }
303 
304 static __inline uint64_t
rdmsr(u_int msr)305 rdmsr(u_int msr)
306 {
307 	uint64_t rv;
308 
309 	__asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
310 	return (rv);
311 }
312 
313 static __inline uint32_t
rdmsr32(u_int msr)314 rdmsr32(u_int msr)
315 {
316 	uint32_t low;
317 
318 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "edx");
319 	return (low);
320 }
321 
322 static __inline uint64_t
rdpmc(u_int pmc)323 rdpmc(u_int pmc)
324 {
325 	uint64_t rv;
326 
327 	__asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
328 	return (rv);
329 }
330 
331 static __inline uint64_t
rdtsc(void)332 rdtsc(void)
333 {
334 	uint64_t rv;
335 
336 	__asm __volatile("rdtsc" : "=A" (rv));
337 	return (rv);
338 }
339 
340 static __inline uint64_t
rdtsc_ordered_lfence(void)341 rdtsc_ordered_lfence(void)
342 {
343 	lfence();
344 	return (rdtsc());
345 }
346 
347 static __inline uint64_t
rdtsc_ordered_mfence(void)348 rdtsc_ordered_mfence(void)
349 {
350 	mfence();
351 	return (rdtsc());
352 }
353 
354 static __inline uint64_t
rdtscp(void)355 rdtscp(void)
356 {
357 	uint64_t rv;
358 
359 	__asm __volatile("rdtscp" : "=A" (rv) : : "ecx");
360 	return (rv);
361 }
362 
363 static __inline uint64_t
rdtscp_aux(uint32_t * aux)364 rdtscp_aux(uint32_t *aux)
365 {
366 	uint64_t rv;
367 
368 	__asm __volatile("rdtscp" : "=A" (rv), "=c" (*aux));
369 	return (rv);
370 }
371 
372 static __inline uint32_t
rdtsc32(void)373 rdtsc32(void)
374 {
375 	uint32_t rv;
376 
377 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
378 	return (rv);
379 }
380 
381 static __inline uint32_t
rdtscp32(void)382 rdtscp32(void)
383 {
384 	uint32_t rv;
385 
386 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
387 	return (rv);
388 }
389 
390 static __inline void
wbinvd(void)391 wbinvd(void)
392 {
393 	__asm __volatile("wbinvd");
394 }
395 
396 static __inline void
write_eflags(u_int ef)397 write_eflags(u_int ef)
398 {
399 	__asm __volatile("pushl %0; popfl" : : "r" (ef));
400 }
401 
402 static __inline void
wrmsr(u_int msr,uint64_t newval)403 wrmsr(u_int msr, uint64_t newval)
404 {
405 	__asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
406 }
407 
408 static __inline void
load_cr0(u_int data)409 load_cr0(u_int data)
410 {
411 
412 	__asm __volatile("movl %0,%%cr0" : : "r" (data));
413 }
414 
415 static __inline u_int
rcr0(void)416 rcr0(void)
417 {
418 	u_int	data;
419 
420 	__asm __volatile("movl %%cr0,%0" : "=r" (data));
421 	return (data);
422 }
423 
424 static __inline u_int
rcr2(void)425 rcr2(void)
426 {
427 	u_int	data;
428 
429 	__asm __volatile("movl %%cr2,%0" : "=r" (data));
430 	return (data);
431 }
432 
433 static __inline void
load_cr3(u_int data)434 load_cr3(u_int data)
435 {
436 
437 	__asm __volatile("movl %0,%%cr3" : : "r" (data) : "memory");
438 }
439 
440 static __inline u_int
rcr3(void)441 rcr3(void)
442 {
443 	u_int	data;
444 
445 	__asm __volatile("movl %%cr3,%0" : "=r" (data));
446 	return (data);
447 }
448 
449 static __inline void
load_cr4(u_int data)450 load_cr4(u_int data)
451 {
452 	__asm __volatile("movl %0,%%cr4" : : "r" (data));
453 }
454 
455 static __inline u_int
rcr4(void)456 rcr4(void)
457 {
458 	u_int	data;
459 
460 	__asm __volatile("movl %%cr4,%0" : "=r" (data));
461 	return (data);
462 }
463 
464 static __inline uint64_t
rxcr(u_int reg)465 rxcr(u_int reg)
466 {
467 	u_int low, high;
468 
469 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
470 	return (low | ((uint64_t)high << 32));
471 }
472 
473 static __inline void
load_xcr(u_int reg,uint64_t val)474 load_xcr(u_int reg, uint64_t val)
475 {
476 	u_int low, high;
477 
478 	low = val;
479 	high = val >> 32;
480 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
481 }
482 
483 /*
484  * Global TLB flush (except for thise for pages marked PG_G)
485  */
486 static __inline void
invltlb(void)487 invltlb(void)
488 {
489 
490 	load_cr3(rcr3());
491 }
492 
493 /*
494  * TLB flush for an individual page (even if it has PG_G).
495  * Only works on 486+ CPUs (i386 does not have PG_G).
496  */
497 static __inline void
invlpg(u_int addr)498 invlpg(u_int addr)
499 {
500 
501 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
502 }
503 
504 static __inline u_short
rfs(void)505 rfs(void)
506 {
507 	u_short sel;
508 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
509 	return (sel);
510 }
511 
512 static __inline uint64_t
rgdt(void)513 rgdt(void)
514 {
515 	uint64_t gdtr;
516 	__asm __volatile("sgdt %0" : "=m" (gdtr));
517 	return (gdtr);
518 }
519 
520 static __inline u_short
rgs(void)521 rgs(void)
522 {
523 	u_short sel;
524 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
525 	return (sel);
526 }
527 
528 static __inline uint64_t
ridt(void)529 ridt(void)
530 {
531 	uint64_t idtr;
532 	__asm __volatile("sidt %0" : "=m" (idtr));
533 	return (idtr);
534 }
535 
536 static __inline u_short
rldt(void)537 rldt(void)
538 {
539 	u_short ldtr;
540 	__asm __volatile("sldt %0" : "=g" (ldtr));
541 	return (ldtr);
542 }
543 
544 static __inline u_short
rss(void)545 rss(void)
546 {
547 	u_short sel;
548 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
549 	return (sel);
550 }
551 
552 static __inline u_short
rtr(void)553 rtr(void)
554 {
555 	u_short tr;
556 	__asm __volatile("str %0" : "=g" (tr));
557 	return (tr);
558 }
559 
560 static __inline void
load_fs(u_short sel)561 load_fs(u_short sel)
562 {
563 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
564 }
565 
566 static __inline void
load_gs(u_short sel)567 load_gs(u_short sel)
568 {
569 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
570 }
571 
572 static __inline void
lidt(struct region_descriptor * addr)573 lidt(struct region_descriptor *addr)
574 {
575 	__asm __volatile("lidt (%0)" : : "r" (addr));
576 }
577 
578 static __inline void
lldt(u_short sel)579 lldt(u_short sel)
580 {
581 	__asm __volatile("lldt %0" : : "r" (sel));
582 }
583 
584 static __inline void
ltr(u_short sel)585 ltr(u_short sel)
586 {
587 	__asm __volatile("ltr %0" : : "r" (sel));
588 }
589 
590 static __inline u_int
rdr0(void)591 rdr0(void)
592 {
593 	u_int	data;
594 	__asm __volatile("movl %%dr0,%0" : "=r" (data));
595 	return (data);
596 }
597 
598 static __inline void
load_dr0(u_int dr0)599 load_dr0(u_int dr0)
600 {
601 	__asm __volatile("movl %0,%%dr0" : : "r" (dr0));
602 }
603 
604 static __inline u_int
rdr1(void)605 rdr1(void)
606 {
607 	u_int	data;
608 	__asm __volatile("movl %%dr1,%0" : "=r" (data));
609 	return (data);
610 }
611 
612 static __inline void
load_dr1(u_int dr1)613 load_dr1(u_int dr1)
614 {
615 	__asm __volatile("movl %0,%%dr1" : : "r" (dr1));
616 }
617 
618 static __inline u_int
rdr2(void)619 rdr2(void)
620 {
621 	u_int	data;
622 	__asm __volatile("movl %%dr2,%0" : "=r" (data));
623 	return (data);
624 }
625 
626 static __inline void
load_dr2(u_int dr2)627 load_dr2(u_int dr2)
628 {
629 	__asm __volatile("movl %0,%%dr2" : : "r" (dr2));
630 }
631 
632 static __inline u_int
rdr3(void)633 rdr3(void)
634 {
635 	u_int	data;
636 	__asm __volatile("movl %%dr3,%0" : "=r" (data));
637 	return (data);
638 }
639 
640 static __inline void
load_dr3(u_int dr3)641 load_dr3(u_int dr3)
642 {
643 	__asm __volatile("movl %0,%%dr3" : : "r" (dr3));
644 }
645 
646 static __inline u_int
rdr6(void)647 rdr6(void)
648 {
649 	u_int	data;
650 	__asm __volatile("movl %%dr6,%0" : "=r" (data));
651 	return (data);
652 }
653 
654 static __inline void
load_dr6(u_int dr6)655 load_dr6(u_int dr6)
656 {
657 	__asm __volatile("movl %0,%%dr6" : : "r" (dr6));
658 }
659 
660 static __inline u_int
rdr7(void)661 rdr7(void)
662 {
663 	u_int	data;
664 	__asm __volatile("movl %%dr7,%0" : "=r" (data));
665 	return (data);
666 }
667 
668 static __inline void
load_dr7(u_int dr7)669 load_dr7(u_int dr7)
670 {
671 	__asm __volatile("movl %0,%%dr7" : : "r" (dr7));
672 }
673 
674 static __inline u_char
read_cyrix_reg(u_char reg)675 read_cyrix_reg(u_char reg)
676 {
677 	outb(0x22, reg);
678 	return inb(0x23);
679 }
680 
681 static __inline void
write_cyrix_reg(u_char reg,u_char data)682 write_cyrix_reg(u_char reg, u_char data)
683 {
684 	outb(0x22, reg);
685 	outb(0x23, data);
686 }
687 
688 static __inline register_t
intr_disable(void)689 intr_disable(void)
690 {
691 	register_t eflags;
692 
693 	eflags = read_eflags();
694 	disable_intr();
695 	return (eflags);
696 }
697 
698 static __inline void
intr_restore(register_t eflags)699 intr_restore(register_t eflags)
700 {
701 	write_eflags(eflags);
702 }
703 
704 static __inline uint32_t
rdpkru(void)705 rdpkru(void)
706 {
707 	uint32_t res;
708 
709 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
710 	return (res);
711 }
712 
713 static __inline void
wrpkru(uint32_t mask)714 wrpkru(uint32_t mask)
715 {
716 
717 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
718 }
719 
720 void    reset_dbregs(void);
721 
722 #ifdef _KERNEL
723 int	rdmsr_safe(u_int msr, uint64_t *val);
724 int	wrmsr_safe(u_int msr, uint64_t newval);
725 #endif
726 
727 #endif /* !_MACHINE_CPUFUNC_H_ */
728