xref: /netbsd/sys/arch/x86/include/cpufunc.h (revision d1c7a51d)
1 /*	$NetBSD: cpufunc.h,v 1.42 2020/10/24 07:14:29 mgorny Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _X86_CPUFUNC_H_
33 #define	_X86_CPUFUNC_H_
34 
35 /*
36  * Functions to provide access to x86-specific instructions.
37  */
38 
39 #include <sys/cdefs.h>
40 #include <sys/types.h>
41 
42 #include <machine/segments.h>
43 #include <machine/specialreg.h>
44 
45 #ifdef _KERNEL
46 #if defined(_KERNEL_OPT)
47 #include "opt_xen.h"
48 #endif
49 
50 static inline void
x86_pause(void)51 x86_pause(void)
52 {
53 	__asm volatile ("pause");
54 }
55 
56 void	x86_lfence(void);
57 void	x86_sfence(void);
58 void	x86_mfence(void);
59 void	x86_flush(void);
60 void	x86_hlt(void);
61 void	x86_stihlt(void);
62 void	tlbflush(void);
63 void	tlbflushg(void);
64 void	invlpg(vaddr_t);
65 void	wbinvd(void);
66 void	breakpoint(void);
67 
68 #define INVPCID_ADDRESS		0
69 #define INVPCID_CONTEXT		1
70 #define INVPCID_ALL		2
71 #define INVPCID_ALL_NONGLOBAL	3
72 
73 static inline void
invpcid(register_t op,uint64_t pcid,vaddr_t va)74 invpcid(register_t op, uint64_t pcid, vaddr_t va)
75 {
76 	struct {
77 		uint64_t pcid;
78 		uint64_t addr;
79 	} desc = {
80 		.pcid = pcid,
81 		.addr = va
82 	};
83 
84 	__asm volatile (
85 		"invpcid %[desc],%[op]"
86 		:
87 		: [desc] "m" (desc), [op] "r" (op)
88 		: "memory"
89 	);
90 }
91 
92 extern uint64_t (*rdtsc)(void);
93 
94 #define _SERIALIZE_lfence	__asm volatile ("lfence")
95 #define _SERIALIZE_mfence	__asm volatile ("mfence")
96 #define _SERIALIZE_cpuid	__asm volatile ("xor %%eax, %%eax;cpuid" ::: \
97 	    "eax", "ebx", "ecx", "edx");
98 
99 #define RDTSCFUNC(fence)			\
100 static inline uint64_t				\
101 rdtsc_##fence(void)				\
102 {						\
103 	uint32_t low, high;			\
104 						\
105 	_SERIALIZE_##fence;			\
106 	__asm volatile (			\
107 		"rdtsc"				\
108 		: "=a" (low), "=d" (high)	\
109 		:				\
110 	);					\
111 						\
112 	return (low | ((uint64_t)high << 32));	\
113 }
114 
115 RDTSCFUNC(lfence)
116 RDTSCFUNC(mfence)
117 RDTSCFUNC(cpuid)
118 
119 #undef _SERIALIZE_LFENCE
120 #undef _SERIALIZE_MFENCE
121 #undef _SERIALIZE_CPUID
122 
123 
124 #ifndef XENPV
125 struct x86_hotpatch_source {
126 	uint8_t *saddr;
127 	uint8_t *eaddr;
128 };
129 
130 struct x86_hotpatch_descriptor {
131 	uint8_t name;
132 	uint8_t nsrc;
133 	const struct x86_hotpatch_source *srcs[];
134 };
135 
136 void	x86_hotpatch(uint8_t, uint8_t);
137 void	x86_patch(bool);
138 #endif
139 
140 void	x86_monitor(const void *, uint32_t, uint32_t);
141 void	x86_mwait(uint32_t, uint32_t);
142 
143 static inline void
x86_cpuid2(uint32_t eax,uint32_t ecx,uint32_t * regs)144 x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs)
145 {
146 	uint32_t ebx, edx;
147 
148 	__asm volatile (
149 		"cpuid"
150 		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
151 		: "a" (eax), "c" (ecx)
152 	);
153 
154 	regs[0] = eax;
155 	regs[1] = ebx;
156 	regs[2] = ecx;
157 	regs[3] = edx;
158 }
159 #define x86_cpuid(a,b)	x86_cpuid2((a), 0, (b))
160 
161 /* -------------------------------------------------------------------------- */
162 
163 void	lidt(struct region_descriptor *);
164 void	lldt(u_short);
165 void	ltr(u_short);
166 
167 static inline uint16_t
x86_getss(void)168 x86_getss(void)
169 {
170 	uint16_t val;
171 
172 	__asm volatile (
173 		"mov	%%ss,%[val]"
174 		: [val] "=r" (val)
175 		:
176 	);
177 	return val;
178 }
179 
180 static inline void
setds(uint16_t val)181 setds(uint16_t val)
182 {
183 	__asm volatile (
184 		"mov	%[val],%%ds"
185 		:
186 		: [val] "r" (val)
187 	);
188 }
189 
190 static inline void
setes(uint16_t val)191 setes(uint16_t val)
192 {
193 	__asm volatile (
194 		"mov	%[val],%%es"
195 		:
196 		: [val] "r" (val)
197 	);
198 }
199 
200 static inline void
setfs(uint16_t val)201 setfs(uint16_t val)
202 {
203 	__asm volatile (
204 		"mov	%[val],%%fs"
205 		:
206 		: [val] "r" (val)
207 	);
208 }
209 
210 void	setusergs(int);
211 
212 /* -------------------------------------------------------------------------- */
213 
214 #define FUNC_CR(crnum)					\
215 	static inline void lcr##crnum(register_t val)	\
216 	{						\
217 		__asm volatile (				\
218 			"mov	%[val],%%cr" #crnum	\
219 			:				\
220 			: [val] "r" (val)		\
221 			: "memory"			\
222 		);					\
223 	}						\
224 	static inline register_t rcr##crnum(void)	\
225 	{						\
226 		register_t val;				\
227 		__asm volatile (				\
228 			"mov	%%cr" #crnum ",%[val]"	\
229 			: [val] "=r" (val)		\
230 			:				\
231 		);					\
232 		return val;				\
233 	}
234 
235 #define PROTO_CR(crnum)					\
236 	void lcr##crnum(register_t);			\
237 	register_t rcr##crnum(void);
238 
239 #ifndef XENPV
240 FUNC_CR(0)
241 FUNC_CR(2)
242 FUNC_CR(3)
243 #else
244 PROTO_CR(0)
245 PROTO_CR(2)
246 PROTO_CR(3)
247 #endif
248 
249 FUNC_CR(4)
250 FUNC_CR(8)
251 
252 /* -------------------------------------------------------------------------- */
253 
254 #define FUNC_DR(drnum)					\
255 	static inline void ldr##drnum(register_t val)	\
256 	{						\
257 		__asm volatile (				\
258 			"mov	%[val],%%dr" #drnum	\
259 			:				\
260 			: [val] "r" (val)		\
261 		);					\
262 	}						\
263 	static inline register_t rdr##drnum(void)	\
264 	{						\
265 		register_t val;				\
266 		__asm volatile (				\
267 			"mov	%%dr" #drnum ",%[val]"	\
268 			: [val] "=r" (val)		\
269 			:				\
270 		);					\
271 		return val;				\
272 	}
273 
274 #define PROTO_DR(drnum)					\
275 	register_t rdr##drnum(void);			\
276 	void ldr##drnum(register_t);
277 
278 #ifndef XENPV
279 FUNC_DR(0)
280 FUNC_DR(1)
281 FUNC_DR(2)
282 FUNC_DR(3)
283 FUNC_DR(6)
284 FUNC_DR(7)
285 #else
286 PROTO_DR(0)
287 PROTO_DR(1)
288 PROTO_DR(2)
289 PROTO_DR(3)
290 PROTO_DR(6)
291 PROTO_DR(7)
292 #endif
293 
294 /* -------------------------------------------------------------------------- */
295 
296 union savefpu;
297 
298 static inline void
fninit(void)299 fninit(void)
300 {
301 	__asm volatile ("fninit" ::: "memory");
302 }
303 
304 static inline void
fnclex(void)305 fnclex(void)
306 {
307 	__asm volatile ("fnclex");
308 }
309 
310 static inline void
fnstcw(uint16_t * val)311 fnstcw(uint16_t *val)
312 {
313 	__asm volatile (
314 		"fnstcw	%[val]"
315 		: [val] "=m" (*val)
316 		:
317 	);
318 }
319 
320 static inline void
fnstsw(uint16_t * val)321 fnstsw(uint16_t *val)
322 {
323 	__asm volatile (
324 		"fnstsw	%[val]"
325 		: [val] "=m" (*val)
326 		:
327 	);
328 }
329 
330 static inline void
clts(void)331 clts(void)
332 {
333 	__asm volatile ("clts" ::: "memory");
334 }
335 
336 void	stts(void);
337 
338 static inline void
x86_stmxcsr(uint32_t * val)339 x86_stmxcsr(uint32_t *val)
340 {
341 	__asm volatile (
342 		"stmxcsr %[val]"
343 		: [val] "=m" (*val)
344 		:
345 	);
346 }
347 
348 static inline void
x86_ldmxcsr(uint32_t * val)349 x86_ldmxcsr(uint32_t *val)
350 {
351 	__asm volatile (
352 		"ldmxcsr %[val]"
353 		:
354 		: [val] "m" (*val)
355 	);
356 }
357 
358 void	fldummy(void);
359 
360 static inline uint64_t
rdxcr(uint32_t xcr)361 rdxcr(uint32_t xcr)
362 {
363 	uint32_t low, high;
364 
365 	__asm volatile (
366 		"xgetbv"
367 		: "=a" (low), "=d" (high)
368 		: "c" (xcr)
369 	);
370 
371 	return (low | ((uint64_t)high << 32));
372 }
373 
374 static inline void
wrxcr(uint32_t xcr,uint64_t val)375 wrxcr(uint32_t xcr, uint64_t val)
376 {
377 	uint32_t low, high;
378 
379 	low = val;
380 	high = val >> 32;
381 	__asm volatile (
382 		"xsetbv"
383 		:
384 		: "a" (low), "d" (high), "c" (xcr)
385 	);
386 }
387 
388 static inline void
fnsave(void * addr)389 fnsave(void *addr)
390 {
391 	uint8_t *area = addr;
392 
393 	__asm volatile (
394 		"fnsave	%[area]"
395 		: [area] "=m" (*area)
396 		:
397 		: "memory"
398 	);
399 }
400 
401 static inline void
frstor(const void * addr)402 frstor(const void *addr)
403 {
404 	const uint8_t *area = addr;
405 
406 	__asm volatile (
407 		"frstor	%[area]"
408 		:
409 		: [area] "m" (*area)
410 		: "memory"
411 	);
412 }
413 
414 static inline void
fxsave(void * addr)415 fxsave(void *addr)
416 {
417 	uint8_t *area = addr;
418 
419 	__asm volatile (
420 		"fxsave	%[area]"
421 		: [area] "=m" (*area)
422 		:
423 		: "memory"
424 	);
425 }
426 
427 static inline void
fxrstor(const void * addr)428 fxrstor(const void *addr)
429 {
430 	const uint8_t *area = addr;
431 
432 	__asm volatile (
433 		"fxrstor %[area]"
434 		:
435 		: [area] "m" (*area)
436 		: "memory"
437 	);
438 }
439 
440 static inline void
xsave(void * addr,uint64_t mask)441 xsave(void *addr, uint64_t mask)
442 {
443 	uint8_t *area = addr;
444 	uint32_t low, high;
445 
446 	low = mask;
447 	high = mask >> 32;
448 	__asm volatile (
449 		"xsave	%[area]"
450 		: [area] "=m" (*area)
451 		: "a" (low), "d" (high)
452 		: "memory"
453 	);
454 }
455 
456 static inline void
xsaveopt(void * addr,uint64_t mask)457 xsaveopt(void *addr, uint64_t mask)
458 {
459 	uint8_t *area = addr;
460 	uint32_t low, high;
461 
462 	low = mask;
463 	high = mask >> 32;
464 	__asm volatile (
465 		"xsaveopt %[area]"
466 		: [area] "=m" (*area)
467 		: "a" (low), "d" (high)
468 		: "memory"
469 	);
470 }
471 
472 static inline void
xrstor(const void * addr,uint64_t mask)473 xrstor(const void *addr, uint64_t mask)
474 {
475 	const uint8_t *area = addr;
476 	uint32_t low, high;
477 
478 	low = mask;
479 	high = mask >> 32;
480 	__asm volatile (
481 		"xrstor %[area]"
482 		:
483 		: [area] "m" (*area), "a" (low), "d" (high)
484 		: "memory"
485 	);
486 }
487 
488 #ifdef __x86_64__
489 static inline void
fxsave64(void * addr)490 fxsave64(void *addr)
491 {
492 	uint8_t *area = addr;
493 
494 	__asm volatile (
495 		"fxsave64	%[area]"
496 		: [area] "=m" (*area)
497 		:
498 		: "memory"
499 	);
500 }
501 
502 static inline void
fxrstor64(const void * addr)503 fxrstor64(const void *addr)
504 {
505 	const uint8_t *area = addr;
506 
507 	__asm volatile (
508 		"fxrstor64 %[area]"
509 		:
510 		: [area] "m" (*area)
511 		: "memory"
512 	);
513 }
514 
515 static inline void
xsave64(void * addr,uint64_t mask)516 xsave64(void *addr, uint64_t mask)
517 {
518 	uint8_t *area = addr;
519 	uint32_t low, high;
520 
521 	low = mask;
522 	high = mask >> 32;
523 	__asm volatile (
524 		"xsave64	%[area]"
525 		: [area] "=m" (*area)
526 		: "a" (low), "d" (high)
527 		: "memory"
528 	);
529 }
530 
531 static inline void
xsaveopt64(void * addr,uint64_t mask)532 xsaveopt64(void *addr, uint64_t mask)
533 {
534 	uint8_t *area = addr;
535 	uint32_t low, high;
536 
537 	low = mask;
538 	high = mask >> 32;
539 	__asm volatile (
540 		"xsaveopt64 %[area]"
541 		: [area] "=m" (*area)
542 		: "a" (low), "d" (high)
543 		: "memory"
544 	);
545 }
546 
547 static inline void
xrstor64(const void * addr,uint64_t mask)548 xrstor64(const void *addr, uint64_t mask)
549 {
550 	const uint8_t *area = addr;
551 	uint32_t low, high;
552 
553 	low = mask;
554 	high = mask >> 32;
555 	__asm volatile (
556 		"xrstor64 %[area]"
557 		:
558 		: [area] "m" (*area), "a" (low), "d" (high)
559 		: "memory"
560 	);
561 }
562 #endif
563 
564 /* -------------------------------------------------------------------------- */
565 
566 #ifdef XENPV
567 void x86_disable_intr(void);
568 void x86_enable_intr(void);
569 #else
570 static inline void
x86_disable_intr(void)571 x86_disable_intr(void)
572 {
573 	__asm volatile ("cli" ::: "memory");
574 }
575 
576 static inline void
x86_enable_intr(void)577 x86_enable_intr(void)
578 {
579 	__asm volatile ("sti" ::: "memory");
580 }
581 #endif /* XENPV */
582 
583 /* Use read_psl, write_psl when saving and restoring interrupt state. */
584 u_long	x86_read_psl(void);
585 void	x86_write_psl(u_long);
586 
587 /* Use read_flags, write_flags to adjust other members of %eflags. */
588 u_long	x86_read_flags(void);
589 void	x86_write_flags(u_long);
590 
591 void	x86_reset(void);
592 
593 /* -------------------------------------------------------------------------- */
594 
595 /*
596  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
597  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
598  */
599 #define	OPTERON_MSR_PASSCODE	0x9c5a203aU
600 
601 static inline uint64_t
rdmsr(u_int msr)602 rdmsr(u_int msr)
603 {
604 	uint32_t low, high;
605 
606 	__asm volatile (
607 		"rdmsr"
608 		: "=a" (low), "=d" (high)
609 		: "c" (msr)
610 	);
611 
612 	return (low | ((uint64_t)high << 32));
613 }
614 
615 static inline uint64_t
rdmsr_locked(u_int msr)616 rdmsr_locked(u_int msr)
617 {
618 	uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
619 
620 	__asm volatile (
621 		"rdmsr"
622 		: "=a" (low), "=d" (high)
623 		: "c" (msr), "D" (pass)
624 	);
625 
626 	return (low | ((uint64_t)high << 32));
627 }
628 
629 int	rdmsr_safe(u_int, uint64_t *);
630 
631 static inline void
wrmsr(u_int msr,uint64_t val)632 wrmsr(u_int msr, uint64_t val)
633 {
634 	uint32_t low, high;
635 
636 	low = val;
637 	high = val >> 32;
638 	__asm volatile (
639 		"wrmsr"
640 		:
641 		: "a" (low), "d" (high), "c" (msr)
642 		: "memory"
643 	);
644 }
645 
646 static inline void
wrmsr_locked(u_int msr,uint64_t val)647 wrmsr_locked(u_int msr, uint64_t val)
648 {
649 	uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
650 
651 	low = val;
652 	high = val >> 32;
653 	__asm volatile (
654 		"wrmsr"
655 		:
656 		: "a" (low), "d" (high), "c" (msr), "D" (pass)
657 		: "memory"
658 	);
659 }
660 
661 #endif /* _KERNEL */
662 
663 #endif /* !_X86_CPUFUNC_H_ */
664