xref: /freebsd/sys/i386/include/cpufunc.h (revision b0056b31)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * Functions to provide access to special i386 instructions.
34  * This in included in sys/systm.h, and that file should be
35  * used in preference to this.
36  */
37 
38 #ifndef _MACHINE_CPUFUNC_H_
39 #define	_MACHINE_CPUFUNC_H_
40 
41 struct region_descriptor;
42 
43 #define readb(va)	(*(volatile uint8_t *) (va))
44 #define readw(va)	(*(volatile uint16_t *) (va))
45 #define readl(va)	(*(volatile uint32_t *) (va))
46 
47 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
48 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
49 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
50 
51 static __inline void
breakpoint(void)52 breakpoint(void)
53 {
54 	__asm __volatile("int $3");
55 }
56 
57 static __inline __pure2 u_int
bsfl(u_int mask)58 bsfl(u_int mask)
59 {
60 	u_int	result;
61 
62 	__asm("bsfl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
63 	return (result);
64 }
65 
66 static __inline void
clflush(u_long addr)67 clflush(u_long addr)
68 {
69 
70 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
71 }
72 
73 static __inline void
clflushopt(u_long addr)74 clflushopt(u_long addr)
75 {
76 
77 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
78 }
79 
80 static __inline void
clts(void)81 clts(void)
82 {
83 
84 	__asm __volatile("clts");
85 }
86 
87 static __inline void
disable_intr(void)88 disable_intr(void)
89 {
90 	__asm __volatile("cli" : : : "memory");
91 }
92 
93 #ifdef _KERNEL
94 static __inline void
do_cpuid(u_int ax,u_int * p)95 do_cpuid(u_int ax, u_int *p)
96 {
97 	__asm __volatile("cpuid"
98 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
99 	    :  "0" (ax));
100 }
101 
102 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)103 cpuid_count(u_int ax, u_int cx, u_int *p)
104 {
105 	__asm __volatile("cpuid"
106 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
107 	    :  "0" (ax), "c" (cx));
108 }
109 #else
110 static __inline void
do_cpuid(u_int ax,u_int * p)111 do_cpuid(u_int ax, u_int *p)
112 {
113 	__asm __volatile(
114 	    "pushl\t%%ebx\n\t"
115 	    "cpuid\n\t"
116 	    "movl\t%%ebx,%1\n\t"
117 	    "popl\t%%ebx"
118 	    : "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
119 	    :  "0" (ax));
120 }
121 
122 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)123 cpuid_count(u_int ax, u_int cx, u_int *p)
124 {
125 	__asm __volatile(
126 	    "pushl\t%%ebx\n\t"
127 	    "cpuid\n\t"
128 	    "movl\t%%ebx,%1\n\t"
129 	    "popl\t%%ebx"
130 	    : "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
131 	    :  "0" (ax), "c" (cx));
132 }
133 #endif
134 
135 static __inline void
enable_intr(void)136 enable_intr(void)
137 {
138 	__asm __volatile("sti");
139 }
140 
141 static __inline void
cpu_monitor(const void * addr,u_long extensions,u_int hints)142 cpu_monitor(const void *addr, u_long extensions, u_int hints)
143 {
144 	__asm __volatile("monitor"
145 	    : : "a" (addr), "c" (extensions), "d" (hints));
146 }
147 
148 static __inline void
cpu_mwait(u_long extensions,u_int hints)149 cpu_mwait(u_long extensions, u_int hints)
150 {
151 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
152 }
153 
154 static __inline void
lfence(void)155 lfence(void)
156 {
157 	__asm __volatile("lfence" : : : "memory");
158 }
159 
160 static __inline void
mfence(void)161 mfence(void)
162 {
163 	__asm __volatile("mfence" : : : "memory");
164 }
165 
166 static __inline void
sfence(void)167 sfence(void)
168 {
169 	__asm __volatile("sfence" : : : "memory");
170 }
171 
172 static __inline void
halt(void)173 halt(void)
174 {
175 	__asm __volatile("hlt");
176 }
177 
178 static __inline u_char
inb(u_int port)179 inb(u_int port)
180 {
181 	u_char	data;
182 
183 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
184 	return (data);
185 }
186 
187 static __inline u_int
inl(u_int port)188 inl(u_int port)
189 {
190 	u_int	data;
191 
192 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
193 	return (data);
194 }
195 
196 static __inline void
insb(u_int port,void * addr,size_t count)197 insb(u_int port, void *addr, size_t count)
198 {
199 	__asm __volatile("cld; rep; insb"
200 			 : "+D" (addr), "+c" (count)
201 			 : "d" (port)
202 			 : "memory");
203 }
204 
205 static __inline void
insw(u_int port,void * addr,size_t count)206 insw(u_int port, void *addr, size_t count)
207 {
208 	__asm __volatile("cld; rep; insw"
209 			 : "+D" (addr), "+c" (count)
210 			 : "d" (port)
211 			 : "memory");
212 }
213 
214 static __inline void
insl(u_int port,void * addr,size_t count)215 insl(u_int port, void *addr, size_t count)
216 {
217 	__asm __volatile("cld; rep; insl"
218 			 : "+D" (addr), "+c" (count)
219 			 : "d" (port)
220 			 : "memory");
221 }
222 
223 static __inline void
invd(void)224 invd(void)
225 {
226 	__asm __volatile("invd");
227 }
228 
229 static __inline u_short
inw(u_int port)230 inw(u_int port)
231 {
232 	u_short	data;
233 
234 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
235 	return (data);
236 }
237 
238 static __inline void
outb(u_int port,u_char data)239 outb(u_int port, u_char data)
240 {
241 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
242 }
243 
244 static __inline void
outl(u_int port,u_int data)245 outl(u_int port, u_int data)
246 {
247 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
248 }
249 
250 static __inline void
outsb(u_int port,const void * addr,size_t count)251 outsb(u_int port, const void *addr, size_t count)
252 {
253 	__asm __volatile("cld; rep; outsb"
254 			 : "+S" (addr), "+c" (count)
255 			 : "d" (port));
256 }
257 
258 static __inline void
outsw(u_int port,const void * addr,size_t count)259 outsw(u_int port, const void *addr, size_t count)
260 {
261 	__asm __volatile("cld; rep; outsw"
262 			 : "+S" (addr), "+c" (count)
263 			 : "d" (port));
264 }
265 
266 static __inline void
outsl(u_int port,const void * addr,size_t count)267 outsl(u_int port, const void *addr, size_t count)
268 {
269 	__asm __volatile("cld; rep; outsl"
270 			 : "+S" (addr), "+c" (count)
271 			 : "d" (port));
272 }
273 
274 static __inline void
outw(u_int port,u_short data)275 outw(u_int port, u_short data)
276 {
277 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
278 }
279 
280 static __inline void
ia32_pause(void)281 ia32_pause(void)
282 {
283 	__asm __volatile("pause");
284 }
285 
286 static __inline u_int
read_eflags(void)287 read_eflags(void)
288 {
289 	u_int	ef;
290 
291 	__asm __volatile("pushfl; popl %0" : "=r" (ef));
292 	return (ef);
293 }
294 
295 static __inline uint64_t
rdmsr(u_int msr)296 rdmsr(u_int msr)
297 {
298 	uint64_t rv;
299 
300 	__asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
301 	return (rv);
302 }
303 
304 static __inline uint32_t
rdmsr32(u_int msr)305 rdmsr32(u_int msr)
306 {
307 	uint32_t low;
308 
309 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "edx");
310 	return (low);
311 }
312 
313 static __inline uint64_t
rdpmc(u_int pmc)314 rdpmc(u_int pmc)
315 {
316 	uint64_t rv;
317 
318 	__asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
319 	return (rv);
320 }
321 
322 static __inline uint64_t
rdtsc(void)323 rdtsc(void)
324 {
325 	uint64_t rv;
326 
327 	__asm __volatile("rdtsc" : "=A" (rv));
328 	return (rv);
329 }
330 
331 static __inline uint64_t
rdtsc_ordered_lfence(void)332 rdtsc_ordered_lfence(void)
333 {
334 	lfence();
335 	return (rdtsc());
336 }
337 
338 static __inline uint64_t
rdtsc_ordered_mfence(void)339 rdtsc_ordered_mfence(void)
340 {
341 	mfence();
342 	return (rdtsc());
343 }
344 
345 static __inline uint64_t
rdtscp(void)346 rdtscp(void)
347 {
348 	uint64_t rv;
349 
350 	__asm __volatile("rdtscp" : "=A" (rv) : : "ecx");
351 	return (rv);
352 }
353 
354 static __inline uint64_t
rdtscp_aux(uint32_t * aux)355 rdtscp_aux(uint32_t *aux)
356 {
357 	uint64_t rv;
358 
359 	__asm __volatile("rdtscp" : "=A" (rv), "=c" (*aux));
360 	return (rv);
361 }
362 
363 static __inline uint32_t
rdtsc32(void)364 rdtsc32(void)
365 {
366 	uint32_t rv;
367 
368 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
369 	return (rv);
370 }
371 
372 static __inline uint32_t
rdtscp32(void)373 rdtscp32(void)
374 {
375 	uint32_t rv;
376 
377 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
378 	return (rv);
379 }
380 
381 static __inline void
wbinvd(void)382 wbinvd(void)
383 {
384 	__asm __volatile("wbinvd");
385 }
386 
387 static __inline void
write_eflags(u_int ef)388 write_eflags(u_int ef)
389 {
390 	__asm __volatile("pushl %0; popfl" : : "r" (ef));
391 }
392 
393 static __inline void
wrmsr(u_int msr,uint64_t newval)394 wrmsr(u_int msr, uint64_t newval)
395 {
396 	__asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
397 }
398 
399 static __inline void
load_cr0(u_int data)400 load_cr0(u_int data)
401 {
402 
403 	__asm __volatile("movl %0,%%cr0" : : "r" (data));
404 }
405 
406 static __inline u_int
rcr0(void)407 rcr0(void)
408 {
409 	u_int	data;
410 
411 	__asm __volatile("movl %%cr0,%0" : "=r" (data));
412 	return (data);
413 }
414 
415 static __inline u_int
rcr2(void)416 rcr2(void)
417 {
418 	u_int	data;
419 
420 	__asm __volatile("movl %%cr2,%0" : "=r" (data));
421 	return (data);
422 }
423 
424 static __inline void
load_cr3(u_int data)425 load_cr3(u_int data)
426 {
427 
428 	__asm __volatile("movl %0,%%cr3" : : "r" (data) : "memory");
429 }
430 
431 static __inline u_int
rcr3(void)432 rcr3(void)
433 {
434 	u_int	data;
435 
436 	__asm __volatile("movl %%cr3,%0" : "=r" (data));
437 	return (data);
438 }
439 
440 static __inline void
load_cr4(u_int data)441 load_cr4(u_int data)
442 {
443 	__asm __volatile("movl %0,%%cr4" : : "r" (data));
444 }
445 
446 static __inline u_int
rcr4(void)447 rcr4(void)
448 {
449 	u_int	data;
450 
451 	__asm __volatile("movl %%cr4,%0" : "=r" (data));
452 	return (data);
453 }
454 
455 static __inline uint64_t
rxcr(u_int reg)456 rxcr(u_int reg)
457 {
458 	u_int low, high;
459 
460 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
461 	return (low | ((uint64_t)high << 32));
462 }
463 
464 static __inline void
load_xcr(u_int reg,uint64_t val)465 load_xcr(u_int reg, uint64_t val)
466 {
467 	u_int low, high;
468 
469 	low = val;
470 	high = val >> 32;
471 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
472 }
473 
474 /*
475  * Global TLB flush (except for thise for pages marked PG_G)
476  */
477 static __inline void
invltlb(void)478 invltlb(void)
479 {
480 
481 	load_cr3(rcr3());
482 }
483 
484 /*
485  * TLB flush for an individual page (even if it has PG_G).
486  * Only works on 486+ CPUs (i386 does not have PG_G).
487  */
488 static __inline void
invlpg(u_int addr)489 invlpg(u_int addr)
490 {
491 
492 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
493 }
494 
495 static __inline u_short
rfs(void)496 rfs(void)
497 {
498 	u_short sel;
499 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
500 	return (sel);
501 }
502 
503 static __inline uint64_t
rgdt(void)504 rgdt(void)
505 {
506 	uint64_t gdtr;
507 	__asm __volatile("sgdt %0" : "=m" (gdtr));
508 	return (gdtr);
509 }
510 
511 static __inline u_short
rgs(void)512 rgs(void)
513 {
514 	u_short sel;
515 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
516 	return (sel);
517 }
518 
519 static __inline uint64_t
ridt(void)520 ridt(void)
521 {
522 	uint64_t idtr;
523 	__asm __volatile("sidt %0" : "=m" (idtr));
524 	return (idtr);
525 }
526 
527 static __inline u_short
rldt(void)528 rldt(void)
529 {
530 	u_short ldtr;
531 	__asm __volatile("sldt %0" : "=g" (ldtr));
532 	return (ldtr);
533 }
534 
535 static __inline u_short
rss(void)536 rss(void)
537 {
538 	u_short sel;
539 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
540 	return (sel);
541 }
542 
543 static __inline u_short
rtr(void)544 rtr(void)
545 {
546 	u_short tr;
547 	__asm __volatile("str %0" : "=g" (tr));
548 	return (tr);
549 }
550 
551 static __inline void
load_fs(u_short sel)552 load_fs(u_short sel)
553 {
554 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
555 }
556 
557 static __inline void
load_gs(u_short sel)558 load_gs(u_short sel)
559 {
560 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
561 }
562 
563 static __inline void
lidt(struct region_descriptor * addr)564 lidt(struct region_descriptor *addr)
565 {
566 	__asm __volatile("lidt (%0)" : : "r" (addr));
567 }
568 
569 static __inline void
lldt(u_short sel)570 lldt(u_short sel)
571 {
572 	__asm __volatile("lldt %0" : : "r" (sel));
573 }
574 
575 static __inline void
ltr(u_short sel)576 ltr(u_short sel)
577 {
578 	__asm __volatile("ltr %0" : : "r" (sel));
579 }
580 
581 static __inline u_int
rdr0(void)582 rdr0(void)
583 {
584 	u_int	data;
585 	__asm __volatile("movl %%dr0,%0" : "=r" (data));
586 	return (data);
587 }
588 
589 static __inline void
load_dr0(u_int dr0)590 load_dr0(u_int dr0)
591 {
592 	__asm __volatile("movl %0,%%dr0" : : "r" (dr0));
593 }
594 
595 static __inline u_int
rdr1(void)596 rdr1(void)
597 {
598 	u_int	data;
599 	__asm __volatile("movl %%dr1,%0" : "=r" (data));
600 	return (data);
601 }
602 
603 static __inline void
load_dr1(u_int dr1)604 load_dr1(u_int dr1)
605 {
606 	__asm __volatile("movl %0,%%dr1" : : "r" (dr1));
607 }
608 
609 static __inline u_int
rdr2(void)610 rdr2(void)
611 {
612 	u_int	data;
613 	__asm __volatile("movl %%dr2,%0" : "=r" (data));
614 	return (data);
615 }
616 
617 static __inline void
load_dr2(u_int dr2)618 load_dr2(u_int dr2)
619 {
620 	__asm __volatile("movl %0,%%dr2" : : "r" (dr2));
621 }
622 
623 static __inline u_int
rdr3(void)624 rdr3(void)
625 {
626 	u_int	data;
627 	__asm __volatile("movl %%dr3,%0" : "=r" (data));
628 	return (data);
629 }
630 
631 static __inline void
load_dr3(u_int dr3)632 load_dr3(u_int dr3)
633 {
634 	__asm __volatile("movl %0,%%dr3" : : "r" (dr3));
635 }
636 
637 static __inline u_int
rdr6(void)638 rdr6(void)
639 {
640 	u_int	data;
641 	__asm __volatile("movl %%dr6,%0" : "=r" (data));
642 	return (data);
643 }
644 
645 static __inline void
load_dr6(u_int dr6)646 load_dr6(u_int dr6)
647 {
648 	__asm __volatile("movl %0,%%dr6" : : "r" (dr6));
649 }
650 
651 static __inline u_int
rdr7(void)652 rdr7(void)
653 {
654 	u_int	data;
655 	__asm __volatile("movl %%dr7,%0" : "=r" (data));
656 	return (data);
657 }
658 
659 static __inline void
load_dr7(u_int dr7)660 load_dr7(u_int dr7)
661 {
662 	__asm __volatile("movl %0,%%dr7" : : "r" (dr7));
663 }
664 
665 static __inline u_char
read_cyrix_reg(u_char reg)666 read_cyrix_reg(u_char reg)
667 {
668 	outb(0x22, reg);
669 	return inb(0x23);
670 }
671 
672 static __inline void
write_cyrix_reg(u_char reg,u_char data)673 write_cyrix_reg(u_char reg, u_char data)
674 {
675 	outb(0x22, reg);
676 	outb(0x23, data);
677 }
678 
679 static __inline register_t
intr_disable(void)680 intr_disable(void)
681 {
682 	register_t eflags;
683 
684 	eflags = read_eflags();
685 	disable_intr();
686 	return (eflags);
687 }
688 
689 static __inline void
intr_restore(register_t eflags)690 intr_restore(register_t eflags)
691 {
692 	write_eflags(eflags);
693 }
694 
695 static __inline uint32_t
rdpkru(void)696 rdpkru(void)
697 {
698 	uint32_t res;
699 
700 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
701 	return (res);
702 }
703 
704 static __inline void
wrpkru(uint32_t mask)705 wrpkru(uint32_t mask)
706 {
707 
708 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
709 }
710 
711 void    reset_dbregs(void);
712 
713 #ifdef _KERNEL
714 int	rdmsr_safe(u_int msr, uint64_t *val);
715 int	wrmsr_safe(u_int msr, uint64_t newval);
716 #endif
717 
718 #endif /* !_MACHINE_CPUFUNC_H_ */
719