xref: /netbsd/sys/arch/x86/include/cpufunc.h (revision e74c42e3)
1*e74c42e3Schristos /*	$NetBSD: cpufunc.h,v 1.32 2019/05/30 21:40:40 christos Exp $	*/
29c412e0cSad 
3fe73a110Smaxv /*
4fe73a110Smaxv  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
59c412e0cSad  * All rights reserved.
69c412e0cSad  *
79c412e0cSad  * This code is derived from software contributed to The NetBSD Foundation
829d22f40Sad  * by Charles M. Hannum, and by Andrew Doran.
99c412e0cSad  *
109c412e0cSad  * Redistribution and use in source and binary forms, with or without
119c412e0cSad  * modification, are permitted provided that the following conditions
129c412e0cSad  * are met:
139c412e0cSad  * 1. Redistributions of source code must retain the above copyright
149c412e0cSad  *    notice, this list of conditions and the following disclaimer.
159c412e0cSad  * 2. Redistributions in binary form must reproduce the above copyright
169c412e0cSad  *    notice, this list of conditions and the following disclaimer in the
179c412e0cSad  *    documentation and/or other materials provided with the distribution.
189c412e0cSad  *
199c412e0cSad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
209c412e0cSad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
219c412e0cSad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
229c412e0cSad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
239c412e0cSad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
249c412e0cSad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
259c412e0cSad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
269c412e0cSad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
279c412e0cSad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
289c412e0cSad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
299c412e0cSad  * POSSIBILITY OF SUCH DAMAGE.
309c412e0cSad  */
319c412e0cSad 
329c412e0cSad #ifndef _X86_CPUFUNC_H_
339c412e0cSad #define	_X86_CPUFUNC_H_
349c412e0cSad 
359c412e0cSad /*
369c412e0cSad  * Functions to provide access to x86-specific instructions.
379c412e0cSad  */
389c412e0cSad 
399c412e0cSad #include <sys/cdefs.h>
409c412e0cSad #include <sys/types.h>
419c412e0cSad 
429c412e0cSad #include <machine/segments.h>
439c412e0cSad #include <machine/specialreg.h>
449c412e0cSad 
459c412e0cSad #ifdef _KERNEL
46a021f85dSbouyer #if defined(_KERNEL_OPT)
47a021f85dSbouyer #include "opt_xen.h"
48a021f85dSbouyer #endif
499c412e0cSad 
50d3e97313Smaxv static inline void
51d3e97313Smaxv x86_pause(void)
52d3e97313Smaxv {
53*e74c42e3Schristos 	__asm volatile ("pause");
54d3e97313Smaxv }
55d3e97313Smaxv 
569c412e0cSad void	x86_lfence(void);
579c412e0cSad void	x86_sfence(void);
589c412e0cSad void	x86_mfence(void);
599c412e0cSad void	x86_flush(void);
60fe73a110Smaxv void	x86_hlt(void);
61fe73a110Smaxv void	x86_stihlt(void);
62fe73a110Smaxv void	tlbflush(void);
63fe73a110Smaxv void	tlbflushg(void);
64fe73a110Smaxv void	invlpg(vaddr_t);
65fe73a110Smaxv void	wbinvd(void);
66fe73a110Smaxv void	breakpoint(void);
67d3e97313Smaxv 
68a237bd42Smaxv #define INVPCID_ADDRESS		0
69a237bd42Smaxv #define INVPCID_CONTEXT		1
70a237bd42Smaxv #define INVPCID_ALL		2
71a237bd42Smaxv #define INVPCID_ALL_NONGLOBAL	3
72a237bd42Smaxv 
73a237bd42Smaxv static inline void
74a237bd42Smaxv invpcid(register_t op, uint64_t pcid, vaddr_t va)
75a237bd42Smaxv {
76a237bd42Smaxv 	struct {
77a237bd42Smaxv 		uint64_t pcid;
78a237bd42Smaxv 		uint64_t addr;
79a237bd42Smaxv 	} desc = {
80a237bd42Smaxv 		.pcid = pcid,
81a237bd42Smaxv 		.addr = va
82a237bd42Smaxv 	};
83a237bd42Smaxv 
84*e74c42e3Schristos 	__asm volatile (
85a237bd42Smaxv 		"invpcid %[desc],%[op]"
86a237bd42Smaxv 		:
87a237bd42Smaxv 		: [desc] "m" (desc), [op] "r" (op)
88a237bd42Smaxv 		: "memory"
89a237bd42Smaxv 	);
90a237bd42Smaxv }
91a237bd42Smaxv 
92d3e97313Smaxv static inline uint64_t
93d3e97313Smaxv rdtsc(void)
94d3e97313Smaxv {
95d3e97313Smaxv 	uint32_t low, high;
96d3e97313Smaxv 
97*e74c42e3Schristos 	__asm volatile (
98d3e97313Smaxv 		"rdtsc"
99d3e97313Smaxv 		: "=a" (low), "=d" (high)
100d3e97313Smaxv 		:
101d3e97313Smaxv 	);
102d3e97313Smaxv 
103d3e97313Smaxv 	return (low | ((uint64_t)high << 32));
104d3e97313Smaxv }
105d3e97313Smaxv 
10643c86f86Scegger #ifndef XEN
107f57bb10eSmaxv void	x86_hotpatch(uint32_t, const uint8_t *, size_t);
108f57bb10eSmaxv void	x86_patch_window_open(u_long *, u_long *);
109f57bb10eSmaxv void	x86_patch_window_close(u_long, u_long);
11030fd42e8Sad void	x86_patch(bool);
11143c86f86Scegger #endif
112fe73a110Smaxv 
113fe73a110Smaxv void	x86_monitor(const void *, uint32_t, uint32_t);
114fe73a110Smaxv void	x86_mwait(uint32_t, uint32_t);
115fe73a110Smaxv /* x86_cpuid2() writes four 32bit values, %eax, %ebx, %ecx and %edx */
116fe73a110Smaxv #define	x86_cpuid(a,b)	x86_cpuid2((a),0,(b))
117fe73a110Smaxv void	x86_cpuid2(uint32_t, uint32_t, uint32_t *);
118fe73a110Smaxv 
119fe73a110Smaxv /* -------------------------------------------------------------------------- */
120fe73a110Smaxv 
1219c412e0cSad void	lidt(struct region_descriptor *);
1229c412e0cSad void	lldt(u_short);
1239c412e0cSad void	ltr(u_short);
124fe73a110Smaxv 
1251b867658Smaxv static inline uint16_t
1261b867658Smaxv x86_getss(void)
1271b867658Smaxv {
1281b867658Smaxv 	uint16_t val;
129fe73a110Smaxv 
130*e74c42e3Schristos 	__asm volatile (
1311b867658Smaxv 		"mov	%%ss,%[val]"
1321b867658Smaxv 		: [val] "=r" (val)
1331b867658Smaxv 		:
1341b867658Smaxv 	);
1351b867658Smaxv 	return val;
1361b867658Smaxv }
1370212664cSdsl 
1381b867658Smaxv static inline void
1391b867658Smaxv setds(uint16_t val)
1401b867658Smaxv {
141*e74c42e3Schristos 	__asm volatile (
1421b867658Smaxv 		"mov	%[val],%%ds"
1431b867658Smaxv 		:
1441b867658Smaxv 		: [val] "r" (val)
1451b867658Smaxv 	);
1461b867658Smaxv }
1471b867658Smaxv 
1481b867658Smaxv static inline void
1491b867658Smaxv setes(uint16_t val)
1501b867658Smaxv {
151*e74c42e3Schristos 	__asm volatile (
1521b867658Smaxv 		"mov	%[val],%%es"
1531b867658Smaxv 		:
1541b867658Smaxv 		: [val] "r" (val)
1551b867658Smaxv 	);
1561b867658Smaxv }
1571b867658Smaxv 
1581b867658Smaxv static inline void
1591b867658Smaxv setfs(uint16_t val)
1601b867658Smaxv {
161*e74c42e3Schristos 	__asm volatile (
1621b867658Smaxv 		"mov	%[val],%%fs"
1631b867658Smaxv 		:
1641b867658Smaxv 		: [val] "r" (val)
1651b867658Smaxv 	);
1661b867658Smaxv }
1671b867658Smaxv 
168fe73a110Smaxv void	setusergs(int);
169fe73a110Smaxv 
170fe73a110Smaxv /* -------------------------------------------------------------------------- */
171fe73a110Smaxv 
1721b867658Smaxv #define FUNC_CR(crnum)					\
1731b867658Smaxv 	static inline void lcr##crnum(register_t val)	\
1741b867658Smaxv 	{						\
175*e74c42e3Schristos 		__asm volatile (				\
1761b867658Smaxv 			"mov	%[val],%%cr" #crnum	\
1771b867658Smaxv 			:				\
1781b867658Smaxv 			: [val] "r" (val)		\
1791b867658Smaxv 		);					\
1801b867658Smaxv 	}						\
1811b867658Smaxv 	static inline register_t rcr##crnum(void)	\
1821b867658Smaxv 	{						\
1831b867658Smaxv 		register_t val;				\
184*e74c42e3Schristos 		__asm volatile (				\
1851b867658Smaxv 			"mov	%%cr" #crnum ",%[val]"	\
1861b867658Smaxv 			: [val] "=r" (val)		\
1871b867658Smaxv 			:				\
1881b867658Smaxv 		);					\
1891b867658Smaxv 		return val;				\
1901b867658Smaxv 	}
1911b867658Smaxv 
1921b867658Smaxv #define PROTO_CR(crnum)					\
1931b867658Smaxv 	void lcr##crnum(register_t);			\
1941b867658Smaxv 	register_t rcr##crnum(void);
1951b867658Smaxv 
1961b867658Smaxv #ifndef XENPV
1971b867658Smaxv FUNC_CR(0)
1981b867658Smaxv FUNC_CR(2)
1991b867658Smaxv FUNC_CR(3)
2001b867658Smaxv #else
2011b867658Smaxv PROTO_CR(0)
2021b867658Smaxv PROTO_CR(2)
2031b867658Smaxv PROTO_CR(3)
2041b867658Smaxv #endif
2051b867658Smaxv 
2061b867658Smaxv FUNC_CR(4)
2071b867658Smaxv FUNC_CR(8)
2081b867658Smaxv 
2091b867658Smaxv /* -------------------------------------------------------------------------- */
2101b867658Smaxv 
2111b867658Smaxv #define FUNC_DR(drnum)					\
2121b867658Smaxv 	static inline void ldr##drnum(register_t val)	\
2131b867658Smaxv 	{						\
214*e74c42e3Schristos 		__asm volatile (				\
2151b867658Smaxv 			"mov	%[val],%%dr" #drnum	\
2161b867658Smaxv 			:				\
2171b867658Smaxv 			: [val] "r" (val)		\
2181b867658Smaxv 		);					\
2191b867658Smaxv 	}						\
2201b867658Smaxv 	static inline register_t rdr##drnum(void)	\
2211b867658Smaxv 	{						\
2221b867658Smaxv 		register_t val;				\
223*e74c42e3Schristos 		__asm volatile (				\
2241b867658Smaxv 			"mov	%%dr" #drnum ",%[val]"	\
2251b867658Smaxv 			: [val] "=r" (val)		\
2261b867658Smaxv 			:				\
2271b867658Smaxv 		);					\
2281b867658Smaxv 		return val;				\
2291b867658Smaxv 	}
2301b867658Smaxv 
2311b867658Smaxv #define PROTO_DR(drnum)					\
2321b867658Smaxv 	register_t rdr##drnum(void);			\
2331b867658Smaxv 	void ldr##drnum(register_t);
2341b867658Smaxv 
2351b867658Smaxv #ifndef XENPV
2361b867658Smaxv FUNC_DR(0)
2371b867658Smaxv FUNC_DR(1)
2381b867658Smaxv FUNC_DR(2)
2391b867658Smaxv FUNC_DR(3)
2401b867658Smaxv FUNC_DR(6)
2411b867658Smaxv FUNC_DR(7)
2421b867658Smaxv #else
2431b867658Smaxv PROTO_DR(0)
2441b867658Smaxv PROTO_DR(1)
2451b867658Smaxv PROTO_DR(2)
2461b867658Smaxv PROTO_DR(3)
2471b867658Smaxv PROTO_DR(6)
2481b867658Smaxv PROTO_DR(7)
2491b867658Smaxv #endif
2501b867658Smaxv 
2511b867658Smaxv /* -------------------------------------------------------------------------- */
2521b867658Smaxv 
2537464d00aSdsl union savefpu;
2541b867658Smaxv 
2551b867658Smaxv static inline void
2561b867658Smaxv fninit(void)
2571b867658Smaxv {
258*e74c42e3Schristos 	__asm volatile ("fninit");
2591b867658Smaxv }
2601b867658Smaxv 
2611b867658Smaxv static inline void
2621b867658Smaxv fnclex(void)
2631b867658Smaxv {
264*e74c42e3Schristos 	__asm volatile ("fnclex");
2651b867658Smaxv }
2661b867658Smaxv 
2677464d00aSdsl void	fnsave(union savefpu *);
268e52346d0Sdsl void	fnstcw(uint16_t *);
26927733cfeSdsl uint16_t fngetsw(void);
27027733cfeSdsl void	fnstsw(uint16_t *);
2717464d00aSdsl void	frstor(const union savefpu *);
2721b867658Smaxv 
2731b867658Smaxv static inline void
2741b867658Smaxv clts(void)
2751b867658Smaxv {
276*e74c42e3Schristos 	__asm volatile ("clts");
2771b867658Smaxv }
2781b867658Smaxv 
2799c412e0cSad void	stts(void);
2807464d00aSdsl void	fxsave(union savefpu *);
2817464d00aSdsl void	fxrstor(const union savefpu *);
282fe73a110Smaxv 
283e52346d0Sdsl void	x86_ldmxcsr(const uint32_t *);
284e52346d0Sdsl void	x86_stmxcsr(uint32_t *);
2850212664cSdsl void	fldummy(void);
2860212664cSdsl 
287d3e97313Smaxv static inline uint64_t
288d3e97313Smaxv rdxcr(uint32_t xcr)
289d3e97313Smaxv {
290d3e97313Smaxv 	uint32_t low, high;
291d3e97313Smaxv 
292*e74c42e3Schristos 	__asm volatile (
293d3e97313Smaxv 		"xgetbv"
294d3e97313Smaxv 		: "=a" (low), "=d" (high)
295d3e97313Smaxv 		: "c" (xcr)
296d3e97313Smaxv 	);
297d3e97313Smaxv 
298d3e97313Smaxv 	return (low | ((uint64_t)high << 32));
299d3e97313Smaxv }
300d3e97313Smaxv 
301d3e97313Smaxv static inline void
302d3e97313Smaxv wrxcr(uint32_t xcr, uint64_t val)
303d3e97313Smaxv {
304d3e97313Smaxv 	uint32_t low, high;
305d3e97313Smaxv 
306d3e97313Smaxv 	low = val;
307d3e97313Smaxv 	high = val >> 32;
308*e74c42e3Schristos 	__asm volatile (
309d3e97313Smaxv 		"xsetbv"
310d3e97313Smaxv 		:
311d3e97313Smaxv 		: "a" (low), "d" (high), "c" (xcr)
312d3e97313Smaxv 	);
313d3e97313Smaxv }
3147464d00aSdsl 
3157464d00aSdsl void	xrstor(const union savefpu *, uint64_t);
3167464d00aSdsl void	xsave(union savefpu *, uint64_t);
3177464d00aSdsl void	xsaveopt(union savefpu *, uint64_t);
3187464d00aSdsl 
319fe73a110Smaxv /* -------------------------------------------------------------------------- */
3209c412e0cSad 
321a021f85dSbouyer #ifdef XENPV
322a021f85dSbouyer void x86_disable_intr(void);
323a021f85dSbouyer void x86_enable_intr(void);
324a021f85dSbouyer #else
3251b867658Smaxv static inline void
3261b867658Smaxv x86_disable_intr(void)
3271b867658Smaxv {
328*e74c42e3Schristos 	__asm volatile ("cli");
3291b867658Smaxv }
3301b867658Smaxv 
3311b867658Smaxv static inline void
3321b867658Smaxv x86_enable_intr(void)
3331b867658Smaxv {
334*e74c42e3Schristos 	__asm volatile ("sti");
3351b867658Smaxv }
336a021f85dSbouyer #endif /* XENPV */
3371b867658Smaxv 
3389c412e0cSad /* Use read_psl, write_psl when saving and restoring interrupt state. */
3399c412e0cSad u_long	x86_read_psl(void);
3409c412e0cSad void	x86_write_psl(u_long);
3419c412e0cSad 
3429c412e0cSad /* Use read_flags, write_flags to adjust other members of %eflags. */
3439c412e0cSad u_long	x86_read_flags(void);
3449c412e0cSad void	x86_write_flags(u_long);
3459c412e0cSad 
346decf9407Schristos void	x86_reset(void);
347decf9407Schristos 
348fe73a110Smaxv /* -------------------------------------------------------------------------- */
349fe73a110Smaxv 
3509c412e0cSad /*
3519c412e0cSad  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
3529c412e0cSad  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
3539c412e0cSad  */
3549c412e0cSad #define	OPTERON_MSR_PASSCODE	0x9c5a203aU
3559c412e0cSad 
356d3e97313Smaxv static inline uint64_t
357d3e97313Smaxv rdmsr(u_int msr)
358d3e97313Smaxv {
359d3e97313Smaxv 	uint32_t low, high;
360d3e97313Smaxv 
361*e74c42e3Schristos 	__asm volatile (
362d3e97313Smaxv 		"rdmsr"
363d3e97313Smaxv 		: "=a" (low), "=d" (high)
364d3e97313Smaxv 		: "c" (msr)
365d3e97313Smaxv 	);
366d3e97313Smaxv 
367d3e97313Smaxv 	return (low | ((uint64_t)high << 32));
368d3e97313Smaxv }
369d3e97313Smaxv 
37024e8aafdSchristos uint64_t	rdmsr_locked(u_int);
37124e8aafdSchristos int		rdmsr_safe(u_int, uint64_t *);
37224e8aafdSchristos 
373d3e97313Smaxv static inline void
374d3e97313Smaxv wrmsr(u_int msr, uint64_t val)
375d3e97313Smaxv {
376d3e97313Smaxv 	uint32_t low, high;
377d3e97313Smaxv 
378d3e97313Smaxv 	low = val;
379d3e97313Smaxv 	high = val >> 32;
380*e74c42e3Schristos 	__asm volatile (
381d3e97313Smaxv 		"wrmsr"
382d3e97313Smaxv 		:
383d3e97313Smaxv 		: "a" (low), "d" (high), "c" (msr)
384d3e97313Smaxv 	);
385d3e97313Smaxv }
386d3e97313Smaxv 
38724e8aafdSchristos void		wrmsr_locked(u_int, uint64_t);
38824e8aafdSchristos 
38924e8aafdSchristos #endif /* _KERNEL */
39024e8aafdSchristos 
3919c412e0cSad #endif /* !_X86_CPUFUNC_H_ */
392