xref: /netbsd/sys/arch/x86/include/cpufunc.h (revision d1c7a51d)
1*d1c7a51dSmgorny /*	$NetBSD: cpufunc.h,v 1.42 2020/10/24 07:14:29 mgorny Exp $	*/
29c412e0cSad 
3fe73a110Smaxv /*
4fe73a110Smaxv  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
59c412e0cSad  * All rights reserved.
69c412e0cSad  *
79c412e0cSad  * This code is derived from software contributed to The NetBSD Foundation
829d22f40Sad  * by Charles M. Hannum, and by Andrew Doran.
99c412e0cSad  *
109c412e0cSad  * Redistribution and use in source and binary forms, with or without
119c412e0cSad  * modification, are permitted provided that the following conditions
129c412e0cSad  * are met:
139c412e0cSad  * 1. Redistributions of source code must retain the above copyright
149c412e0cSad  *    notice, this list of conditions and the following disclaimer.
159c412e0cSad  * 2. Redistributions in binary form must reproduce the above copyright
169c412e0cSad  *    notice, this list of conditions and the following disclaimer in the
179c412e0cSad  *    documentation and/or other materials provided with the distribution.
189c412e0cSad  *
199c412e0cSad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
209c412e0cSad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
219c412e0cSad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
229c412e0cSad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
239c412e0cSad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
249c412e0cSad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
259c412e0cSad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
269c412e0cSad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
279c412e0cSad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
289c412e0cSad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
299c412e0cSad  * POSSIBILITY OF SUCH DAMAGE.
309c412e0cSad  */
319c412e0cSad 
329c412e0cSad #ifndef _X86_CPUFUNC_H_
339c412e0cSad #define	_X86_CPUFUNC_H_
349c412e0cSad 
359c412e0cSad /*
369c412e0cSad  * Functions to provide access to x86-specific instructions.
379c412e0cSad  */
389c412e0cSad 
399c412e0cSad #include <sys/cdefs.h>
409c412e0cSad #include <sys/types.h>
419c412e0cSad 
429c412e0cSad #include <machine/segments.h>
439c412e0cSad #include <machine/specialreg.h>
449c412e0cSad 
459c412e0cSad #ifdef _KERNEL
46a021f85dSbouyer #if defined(_KERNEL_OPT)
47a021f85dSbouyer #include "opt_xen.h"
48a021f85dSbouyer #endif
499c412e0cSad 
50d3e97313Smaxv static inline void
x86_pause(void)51d3e97313Smaxv x86_pause(void)
52d3e97313Smaxv {
53e74c42e3Schristos 	__asm volatile ("pause");
54d3e97313Smaxv }
55d3e97313Smaxv 
569c412e0cSad void	x86_lfence(void);
579c412e0cSad void	x86_sfence(void);
589c412e0cSad void	x86_mfence(void);
599c412e0cSad void	x86_flush(void);
60fe73a110Smaxv void	x86_hlt(void);
61fe73a110Smaxv void	x86_stihlt(void);
62fe73a110Smaxv void	tlbflush(void);
63fe73a110Smaxv void	tlbflushg(void);
64fe73a110Smaxv void	invlpg(vaddr_t);
65fe73a110Smaxv void	wbinvd(void);
66fe73a110Smaxv void	breakpoint(void);
67d3e97313Smaxv 
68a237bd42Smaxv #define INVPCID_ADDRESS		0
69a237bd42Smaxv #define INVPCID_CONTEXT		1
70a237bd42Smaxv #define INVPCID_ALL		2
71a237bd42Smaxv #define INVPCID_ALL_NONGLOBAL	3
72a237bd42Smaxv 
73a237bd42Smaxv static inline void
invpcid(register_t op,uint64_t pcid,vaddr_t va)74a237bd42Smaxv invpcid(register_t op, uint64_t pcid, vaddr_t va)
75a237bd42Smaxv {
76a237bd42Smaxv 	struct {
77a237bd42Smaxv 		uint64_t pcid;
78a237bd42Smaxv 		uint64_t addr;
79a237bd42Smaxv 	} desc = {
80a237bd42Smaxv 		.pcid = pcid,
81a237bd42Smaxv 		.addr = va
82a237bd42Smaxv 	};
83a237bd42Smaxv 
84e74c42e3Schristos 	__asm volatile (
85a237bd42Smaxv 		"invpcid %[desc],%[op]"
86a237bd42Smaxv 		:
87a237bd42Smaxv 		: [desc] "m" (desc), [op] "r" (op)
88a237bd42Smaxv 		: "memory"
89a237bd42Smaxv 	);
90a237bd42Smaxv }
91a237bd42Smaxv 
9259220c8cSmsaitoh extern uint64_t (*rdtsc)(void);
93d3e97313Smaxv 
9459220c8cSmsaitoh #define _SERIALIZE_lfence	__asm volatile ("lfence")
9559220c8cSmsaitoh #define _SERIALIZE_mfence	__asm volatile ("mfence")
9659220c8cSmsaitoh #define _SERIALIZE_cpuid	__asm volatile ("xor %%eax, %%eax;cpuid" ::: \
9759220c8cSmsaitoh 	    "eax", "ebx", "ecx", "edx");
98d3e97313Smaxv 
9959220c8cSmsaitoh #define RDTSCFUNC(fence)			\
10059220c8cSmsaitoh static inline uint64_t				\
10159220c8cSmsaitoh rdtsc_##fence(void)				\
10259220c8cSmsaitoh {						\
10359220c8cSmsaitoh 	uint32_t low, high;			\
10459220c8cSmsaitoh 						\
10559220c8cSmsaitoh 	_SERIALIZE_##fence;			\
10659220c8cSmsaitoh 	__asm volatile (			\
10759220c8cSmsaitoh 		"rdtsc"				\
10859220c8cSmsaitoh 		: "=a" (low), "=d" (high)	\
10959220c8cSmsaitoh 		:				\
11059220c8cSmsaitoh 	);					\
11159220c8cSmsaitoh 						\
11259220c8cSmsaitoh 	return (low | ((uint64_t)high << 32));	\
113d3e97313Smaxv }
114d3e97313Smaxv 
11559220c8cSmsaitoh RDTSCFUNC(lfence)
11659220c8cSmsaitoh RDTSCFUNC(mfence)
11759220c8cSmsaitoh RDTSCFUNC(cpuid)
11859220c8cSmsaitoh 
11959220c8cSmsaitoh #undef _SERIALIZE_LFENCE
12059220c8cSmsaitoh #undef _SERIALIZE_MFENCE
12159220c8cSmsaitoh #undef _SERIALIZE_CPUID
12259220c8cSmsaitoh 
12359220c8cSmsaitoh 
12422e594a0Sbouyer #ifndef XENPV
125607b6668Smaxv struct x86_hotpatch_source {
126607b6668Smaxv 	uint8_t *saddr;
127607b6668Smaxv 	uint8_t *eaddr;
128607b6668Smaxv };
129607b6668Smaxv 
130607b6668Smaxv struct x86_hotpatch_descriptor {
131607b6668Smaxv 	uint8_t name;
132607b6668Smaxv 	uint8_t nsrc;
133607b6668Smaxv 	const struct x86_hotpatch_source *srcs[];
134607b6668Smaxv };
135607b6668Smaxv 
136607b6668Smaxv void	x86_hotpatch(uint8_t, uint8_t);
13730fd42e8Sad void	x86_patch(bool);
13843c86f86Scegger #endif
139fe73a110Smaxv 
140fe73a110Smaxv void	x86_monitor(const void *, uint32_t, uint32_t);
141fe73a110Smaxv void	x86_mwait(uint32_t, uint32_t);
142c240d869Smaxv 
143c240d869Smaxv static inline void
x86_cpuid2(uint32_t eax,uint32_t ecx,uint32_t * regs)144c240d869Smaxv x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs)
145c240d869Smaxv {
146c240d869Smaxv 	uint32_t ebx, edx;
147c240d869Smaxv 
148c240d869Smaxv 	__asm volatile (
149c240d869Smaxv 		"cpuid"
150c240d869Smaxv 		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
151c240d869Smaxv 		: "a" (eax), "c" (ecx)
152c240d869Smaxv 	);
153c240d869Smaxv 
154c240d869Smaxv 	regs[0] = eax;
155c240d869Smaxv 	regs[1] = ebx;
156c240d869Smaxv 	regs[2] = ecx;
157c240d869Smaxv 	regs[3] = edx;
158c240d869Smaxv }
159fe73a110Smaxv #define x86_cpuid(a,b)	x86_cpuid2((a), 0, (b))
160fe73a110Smaxv 
161fe73a110Smaxv /* -------------------------------------------------------------------------- */
162fe73a110Smaxv 
1639c412e0cSad void	lidt(struct region_descriptor *);
1649c412e0cSad void	lldt(u_short);
1659c412e0cSad void	ltr(u_short);
166fe73a110Smaxv 
1671b867658Smaxv static inline uint16_t
x86_getss(void)1681b867658Smaxv x86_getss(void)
1691b867658Smaxv {
1701b867658Smaxv 	uint16_t val;
171fe73a110Smaxv 
172e74c42e3Schristos 	__asm volatile (
1731b867658Smaxv 		"mov	%%ss,%[val]"
1741b867658Smaxv 		: [val] "=r" (val)
1751b867658Smaxv 		:
1761b867658Smaxv 	);
1771b867658Smaxv 	return val;
1781b867658Smaxv }
1790212664cSdsl 
1801b867658Smaxv static inline void
setds(uint16_t val)1811b867658Smaxv setds(uint16_t val)
1821b867658Smaxv {
183e74c42e3Schristos 	__asm volatile (
1841b867658Smaxv 		"mov	%[val],%%ds"
1851b867658Smaxv 		:
1861b867658Smaxv 		: [val] "r" (val)
1871b867658Smaxv 	);
1881b867658Smaxv }
1891b867658Smaxv 
1901b867658Smaxv static inline void
setes(uint16_t val)1911b867658Smaxv setes(uint16_t val)
1921b867658Smaxv {
193e74c42e3Schristos 	__asm volatile (
1941b867658Smaxv 		"mov	%[val],%%es"
1951b867658Smaxv 		:
1961b867658Smaxv 		: [val] "r" (val)
1971b867658Smaxv 	);
1981b867658Smaxv }
1991b867658Smaxv 
2001b867658Smaxv static inline void
setfs(uint16_t val)2011b867658Smaxv setfs(uint16_t val)
2021b867658Smaxv {
203e74c42e3Schristos 	__asm volatile (
2041b867658Smaxv 		"mov	%[val],%%fs"
2051b867658Smaxv 		:
2061b867658Smaxv 		: [val] "r" (val)
2071b867658Smaxv 	);
2081b867658Smaxv }
2091b867658Smaxv 
210fe73a110Smaxv void	setusergs(int);
211fe73a110Smaxv 
212fe73a110Smaxv /* -------------------------------------------------------------------------- */
213fe73a110Smaxv 
2141b867658Smaxv #define FUNC_CR(crnum)					\
2151b867658Smaxv 	static inline void lcr##crnum(register_t val)	\
2161b867658Smaxv 	{						\
217e74c42e3Schristos 		__asm volatile (				\
2181b867658Smaxv 			"mov	%[val],%%cr" #crnum	\
2191b867658Smaxv 			:				\
2201b867658Smaxv 			: [val] "r" (val)		\
221c240d869Smaxv 			: "memory"			\
2221b867658Smaxv 		);					\
2231b867658Smaxv 	}						\
2241b867658Smaxv 	static inline register_t rcr##crnum(void)	\
2251b867658Smaxv 	{						\
2261b867658Smaxv 		register_t val;				\
227e74c42e3Schristos 		__asm volatile (				\
2281b867658Smaxv 			"mov	%%cr" #crnum ",%[val]"	\
2291b867658Smaxv 			: [val] "=r" (val)		\
2301b867658Smaxv 			:				\
2311b867658Smaxv 		);					\
2321b867658Smaxv 		return val;				\
2331b867658Smaxv 	}
2341b867658Smaxv 
2351b867658Smaxv #define PROTO_CR(crnum)					\
2361b867658Smaxv 	void lcr##crnum(register_t);			\
2371b867658Smaxv 	register_t rcr##crnum(void);
2381b867658Smaxv 
2391b867658Smaxv #ifndef XENPV
2401b867658Smaxv FUNC_CR(0)
2411b867658Smaxv FUNC_CR(2)
2421b867658Smaxv FUNC_CR(3)
2431b867658Smaxv #else
2441b867658Smaxv PROTO_CR(0)
2451b867658Smaxv PROTO_CR(2)
2461b867658Smaxv PROTO_CR(3)
2471b867658Smaxv #endif
2481b867658Smaxv 
2491b867658Smaxv FUNC_CR(4)
2501b867658Smaxv FUNC_CR(8)
2511b867658Smaxv 
2521b867658Smaxv /* -------------------------------------------------------------------------- */
2531b867658Smaxv 
2541b867658Smaxv #define FUNC_DR(drnum)					\
2551b867658Smaxv 	static inline void ldr##drnum(register_t val)	\
2561b867658Smaxv 	{						\
257e74c42e3Schristos 		__asm volatile (				\
2581b867658Smaxv 			"mov	%[val],%%dr" #drnum	\
2591b867658Smaxv 			:				\
2601b867658Smaxv 			: [val] "r" (val)		\
2611b867658Smaxv 		);					\
2621b867658Smaxv 	}						\
2631b867658Smaxv 	static inline register_t rdr##drnum(void)	\
2641b867658Smaxv 	{						\
2651b867658Smaxv 		register_t val;				\
266e74c42e3Schristos 		__asm volatile (				\
2671b867658Smaxv 			"mov	%%dr" #drnum ",%[val]"	\
2681b867658Smaxv 			: [val] "=r" (val)		\
2691b867658Smaxv 			:				\
2701b867658Smaxv 		);					\
2711b867658Smaxv 		return val;				\
2721b867658Smaxv 	}
2731b867658Smaxv 
2741b867658Smaxv #define PROTO_DR(drnum)					\
2751b867658Smaxv 	register_t rdr##drnum(void);			\
2761b867658Smaxv 	void ldr##drnum(register_t);
2771b867658Smaxv 
2781b867658Smaxv #ifndef XENPV
2791b867658Smaxv FUNC_DR(0)
2801b867658Smaxv FUNC_DR(1)
2811b867658Smaxv FUNC_DR(2)
2821b867658Smaxv FUNC_DR(3)
2831b867658Smaxv FUNC_DR(6)
2841b867658Smaxv FUNC_DR(7)
2851b867658Smaxv #else
2861b867658Smaxv PROTO_DR(0)
2871b867658Smaxv PROTO_DR(1)
2881b867658Smaxv PROTO_DR(2)
2891b867658Smaxv PROTO_DR(3)
2901b867658Smaxv PROTO_DR(6)
2911b867658Smaxv PROTO_DR(7)
2921b867658Smaxv #endif
2931b867658Smaxv 
2941b867658Smaxv /* -------------------------------------------------------------------------- */
2951b867658Smaxv 
2967464d00aSdsl union savefpu;
2971b867658Smaxv 
2981b867658Smaxv static inline void
fninit(void)2991b867658Smaxv fninit(void)
3001b867658Smaxv {
3015ef56094Smaxv 	__asm volatile ("fninit" ::: "memory");
3021b867658Smaxv }
3031b867658Smaxv 
3041b867658Smaxv static inline void
fnclex(void)3051b867658Smaxv fnclex(void)
3061b867658Smaxv {
307e74c42e3Schristos 	__asm volatile ("fnclex");
3081b867658Smaxv }
3091b867658Smaxv 
310009a712fSmaxv static inline void
fnstcw(uint16_t * val)311009a712fSmaxv fnstcw(uint16_t *val)
312009a712fSmaxv {
313009a712fSmaxv 	__asm volatile (
314009a712fSmaxv 		"fnstcw	%[val]"
315009a712fSmaxv 		: [val] "=m" (*val)
316009a712fSmaxv 		:
317009a712fSmaxv 	);
318009a712fSmaxv }
319009a712fSmaxv 
320009a712fSmaxv static inline void
fnstsw(uint16_t * val)321009a712fSmaxv fnstsw(uint16_t *val)
322009a712fSmaxv {
323009a712fSmaxv 	__asm volatile (
324009a712fSmaxv 		"fnstsw	%[val]"
325009a712fSmaxv 		: [val] "=m" (*val)
326009a712fSmaxv 		:
327009a712fSmaxv 	);
328009a712fSmaxv }
3291b867658Smaxv 
3301b867658Smaxv static inline void
clts(void)3311b867658Smaxv clts(void)
3321b867658Smaxv {
3335ef56094Smaxv 	__asm volatile ("clts" ::: "memory");
3341b867658Smaxv }
3351b867658Smaxv 
3369c412e0cSad void	stts(void);
337fe73a110Smaxv 
338009a712fSmaxv static inline void
x86_stmxcsr(uint32_t * val)339009a712fSmaxv x86_stmxcsr(uint32_t *val)
340009a712fSmaxv {
341009a712fSmaxv 	__asm volatile (
342009a712fSmaxv 		"stmxcsr %[val]"
343009a712fSmaxv 		: [val] "=m" (*val)
344009a712fSmaxv 		:
345009a712fSmaxv 	);
346009a712fSmaxv }
347009a712fSmaxv 
348009a712fSmaxv static inline void
x86_ldmxcsr(uint32_t * val)349009a712fSmaxv x86_ldmxcsr(uint32_t *val)
350009a712fSmaxv {
351009a712fSmaxv 	__asm volatile (
352009a712fSmaxv 		"ldmxcsr %[val]"
353009a712fSmaxv 		:
354009a712fSmaxv 		: [val] "m" (*val)
355009a712fSmaxv 	);
356009a712fSmaxv }
357009a712fSmaxv 
3580212664cSdsl void	fldummy(void);
3590212664cSdsl 
360d3e97313Smaxv static inline uint64_t
rdxcr(uint32_t xcr)361d3e97313Smaxv rdxcr(uint32_t xcr)
362d3e97313Smaxv {
363d3e97313Smaxv 	uint32_t low, high;
364d3e97313Smaxv 
365e74c42e3Schristos 	__asm volatile (
366d3e97313Smaxv 		"xgetbv"
367d3e97313Smaxv 		: "=a" (low), "=d" (high)
368d3e97313Smaxv 		: "c" (xcr)
369d3e97313Smaxv 	);
370d3e97313Smaxv 
371d3e97313Smaxv 	return (low | ((uint64_t)high << 32));
372d3e97313Smaxv }
373d3e97313Smaxv 
374d3e97313Smaxv static inline void
wrxcr(uint32_t xcr,uint64_t val)375d3e97313Smaxv wrxcr(uint32_t xcr, uint64_t val)
376d3e97313Smaxv {
377d3e97313Smaxv 	uint32_t low, high;
378d3e97313Smaxv 
379d3e97313Smaxv 	low = val;
380d3e97313Smaxv 	high = val >> 32;
381e74c42e3Schristos 	__asm volatile (
382d3e97313Smaxv 		"xsetbv"
383d3e97313Smaxv 		:
384d3e97313Smaxv 		: "a" (low), "d" (high), "c" (xcr)
385d3e97313Smaxv 	);
386d3e97313Smaxv }
3877464d00aSdsl 
388a4eb140eSmaxv static inline void
fnsave(void * addr)389a4eb140eSmaxv fnsave(void *addr)
390a4eb140eSmaxv {
391a4eb140eSmaxv 	uint8_t *area = addr;
392009a712fSmaxv 
393a4eb140eSmaxv 	__asm volatile (
394a4eb140eSmaxv 		"fnsave	%[area]"
395a4eb140eSmaxv 		: [area] "=m" (*area)
396a4eb140eSmaxv 		:
397a4eb140eSmaxv 		: "memory"
398a4eb140eSmaxv 	);
399a4eb140eSmaxv }
400009a712fSmaxv 
401a4eb140eSmaxv static inline void
frstor(const void * addr)4025e9255f6Sriastradh frstor(const void *addr)
403a4eb140eSmaxv {
404a4eb140eSmaxv 	const uint8_t *area = addr;
405a4eb140eSmaxv 
406a4eb140eSmaxv 	__asm volatile (
407a4eb140eSmaxv 		"frstor	%[area]"
408a4eb140eSmaxv 		:
409a4eb140eSmaxv 		: [area] "m" (*area)
410a4eb140eSmaxv 		: "memory"
411a4eb140eSmaxv 	);
412a4eb140eSmaxv }
413a4eb140eSmaxv 
414a4eb140eSmaxv static inline void
fxsave(void * addr)415a4eb140eSmaxv fxsave(void *addr)
416a4eb140eSmaxv {
417a4eb140eSmaxv 	uint8_t *area = addr;
418a4eb140eSmaxv 
419a4eb140eSmaxv 	__asm volatile (
420a4eb140eSmaxv 		"fxsave	%[area]"
421a4eb140eSmaxv 		: [area] "=m" (*area)
422a4eb140eSmaxv 		:
423a4eb140eSmaxv 		: "memory"
424a4eb140eSmaxv 	);
425a4eb140eSmaxv }
426a4eb140eSmaxv 
427a4eb140eSmaxv static inline void
fxrstor(const void * addr)4285e9255f6Sriastradh fxrstor(const void *addr)
429a4eb140eSmaxv {
430a4eb140eSmaxv 	const uint8_t *area = addr;
431a4eb140eSmaxv 
432a4eb140eSmaxv 	__asm volatile (
433a4eb140eSmaxv 		"fxrstor %[area]"
434a4eb140eSmaxv 		:
435a4eb140eSmaxv 		: [area] "m" (*area)
436a4eb140eSmaxv 		: "memory"
437a4eb140eSmaxv 	);
438a4eb140eSmaxv }
439a4eb140eSmaxv 
440a4eb140eSmaxv static inline void
xsave(void * addr,uint64_t mask)441a4eb140eSmaxv xsave(void *addr, uint64_t mask)
442a4eb140eSmaxv {
443a4eb140eSmaxv 	uint8_t *area = addr;
444a4eb140eSmaxv 	uint32_t low, high;
445a4eb140eSmaxv 
446a4eb140eSmaxv 	low = mask;
447a4eb140eSmaxv 	high = mask >> 32;
448a4eb140eSmaxv 	__asm volatile (
449a4eb140eSmaxv 		"xsave	%[area]"
450a4eb140eSmaxv 		: [area] "=m" (*area)
451a4eb140eSmaxv 		: "a" (low), "d" (high)
452a4eb140eSmaxv 		: "memory"
453a4eb140eSmaxv 	);
454a4eb140eSmaxv }
455a4eb140eSmaxv 
456a4eb140eSmaxv static inline void
xsaveopt(void * addr,uint64_t mask)457a4eb140eSmaxv xsaveopt(void *addr, uint64_t mask)
458a4eb140eSmaxv {
459a4eb140eSmaxv 	uint8_t *area = addr;
460a4eb140eSmaxv 	uint32_t low, high;
461a4eb140eSmaxv 
462a4eb140eSmaxv 	low = mask;
463a4eb140eSmaxv 	high = mask >> 32;
464a4eb140eSmaxv 	__asm volatile (
465a4eb140eSmaxv 		"xsaveopt %[area]"
466a4eb140eSmaxv 		: [area] "=m" (*area)
467a4eb140eSmaxv 		: "a" (low), "d" (high)
468a4eb140eSmaxv 		: "memory"
469a4eb140eSmaxv 	);
470a4eb140eSmaxv }
471a4eb140eSmaxv 
472a4eb140eSmaxv static inline void
xrstor(const void * addr,uint64_t mask)4735e9255f6Sriastradh xrstor(const void *addr, uint64_t mask)
474a4eb140eSmaxv {
475a4eb140eSmaxv 	const uint8_t *area = addr;
476a4eb140eSmaxv 	uint32_t low, high;
477a4eb140eSmaxv 
478a4eb140eSmaxv 	low = mask;
479a4eb140eSmaxv 	high = mask >> 32;
480a4eb140eSmaxv 	__asm volatile (
481a4eb140eSmaxv 		"xrstor %[area]"
482a4eb140eSmaxv 		:
483a4eb140eSmaxv 		: [area] "m" (*area), "a" (low), "d" (high)
484a4eb140eSmaxv 		: "memory"
485a4eb140eSmaxv 	);
486a4eb140eSmaxv }
4877464d00aSdsl 
488*d1c7a51dSmgorny #ifdef __x86_64__
489*d1c7a51dSmgorny static inline void
fxsave64(void * addr)490*d1c7a51dSmgorny fxsave64(void *addr)
491*d1c7a51dSmgorny {
492*d1c7a51dSmgorny 	uint8_t *area = addr;
493*d1c7a51dSmgorny 
494*d1c7a51dSmgorny 	__asm volatile (
495*d1c7a51dSmgorny 		"fxsave64	%[area]"
496*d1c7a51dSmgorny 		: [area] "=m" (*area)
497*d1c7a51dSmgorny 		:
498*d1c7a51dSmgorny 		: "memory"
499*d1c7a51dSmgorny 	);
500*d1c7a51dSmgorny }
501*d1c7a51dSmgorny 
502*d1c7a51dSmgorny static inline void
fxrstor64(const void * addr)503*d1c7a51dSmgorny fxrstor64(const void *addr)
504*d1c7a51dSmgorny {
505*d1c7a51dSmgorny 	const uint8_t *area = addr;
506*d1c7a51dSmgorny 
507*d1c7a51dSmgorny 	__asm volatile (
508*d1c7a51dSmgorny 		"fxrstor64 %[area]"
509*d1c7a51dSmgorny 		:
510*d1c7a51dSmgorny 		: [area] "m" (*area)
511*d1c7a51dSmgorny 		: "memory"
512*d1c7a51dSmgorny 	);
513*d1c7a51dSmgorny }
514*d1c7a51dSmgorny 
515*d1c7a51dSmgorny static inline void
xsave64(void * addr,uint64_t mask)516*d1c7a51dSmgorny xsave64(void *addr, uint64_t mask)
517*d1c7a51dSmgorny {
518*d1c7a51dSmgorny 	uint8_t *area = addr;
519*d1c7a51dSmgorny 	uint32_t low, high;
520*d1c7a51dSmgorny 
521*d1c7a51dSmgorny 	low = mask;
522*d1c7a51dSmgorny 	high = mask >> 32;
523*d1c7a51dSmgorny 	__asm volatile (
524*d1c7a51dSmgorny 		"xsave64	%[area]"
525*d1c7a51dSmgorny 		: [area] "=m" (*area)
526*d1c7a51dSmgorny 		: "a" (low), "d" (high)
527*d1c7a51dSmgorny 		: "memory"
528*d1c7a51dSmgorny 	);
529*d1c7a51dSmgorny }
530*d1c7a51dSmgorny 
531*d1c7a51dSmgorny static inline void
xsaveopt64(void * addr,uint64_t mask)532*d1c7a51dSmgorny xsaveopt64(void *addr, uint64_t mask)
533*d1c7a51dSmgorny {
534*d1c7a51dSmgorny 	uint8_t *area = addr;
535*d1c7a51dSmgorny 	uint32_t low, high;
536*d1c7a51dSmgorny 
537*d1c7a51dSmgorny 	low = mask;
538*d1c7a51dSmgorny 	high = mask >> 32;
539*d1c7a51dSmgorny 	__asm volatile (
540*d1c7a51dSmgorny 		"xsaveopt64 %[area]"
541*d1c7a51dSmgorny 		: [area] "=m" (*area)
542*d1c7a51dSmgorny 		: "a" (low), "d" (high)
543*d1c7a51dSmgorny 		: "memory"
544*d1c7a51dSmgorny 	);
545*d1c7a51dSmgorny }
546*d1c7a51dSmgorny 
547*d1c7a51dSmgorny static inline void
xrstor64(const void * addr,uint64_t mask)548*d1c7a51dSmgorny xrstor64(const void *addr, uint64_t mask)
549*d1c7a51dSmgorny {
550*d1c7a51dSmgorny 	const uint8_t *area = addr;
551*d1c7a51dSmgorny 	uint32_t low, high;
552*d1c7a51dSmgorny 
553*d1c7a51dSmgorny 	low = mask;
554*d1c7a51dSmgorny 	high = mask >> 32;
555*d1c7a51dSmgorny 	__asm volatile (
556*d1c7a51dSmgorny 		"xrstor64 %[area]"
557*d1c7a51dSmgorny 		:
558*d1c7a51dSmgorny 		: [area] "m" (*area), "a" (low), "d" (high)
559*d1c7a51dSmgorny 		: "memory"
560*d1c7a51dSmgorny 	);
561*d1c7a51dSmgorny }
562*d1c7a51dSmgorny #endif
563*d1c7a51dSmgorny 
564fe73a110Smaxv /* -------------------------------------------------------------------------- */
5659c412e0cSad 
566a021f85dSbouyer #ifdef XENPV
567a021f85dSbouyer void x86_disable_intr(void);
568a021f85dSbouyer void x86_enable_intr(void);
569a021f85dSbouyer #else
5701b867658Smaxv static inline void
x86_disable_intr(void)5711b867658Smaxv x86_disable_intr(void)
5721b867658Smaxv {
573c240d869Smaxv 	__asm volatile ("cli" ::: "memory");
5741b867658Smaxv }
5751b867658Smaxv 
5761b867658Smaxv static inline void
x86_enable_intr(void)5771b867658Smaxv x86_enable_intr(void)
5781b867658Smaxv {
579c240d869Smaxv 	__asm volatile ("sti" ::: "memory");
5801b867658Smaxv }
581a021f85dSbouyer #endif /* XENPV */
5821b867658Smaxv 
5839c412e0cSad /* Use read_psl, write_psl when saving and restoring interrupt state. */
5849c412e0cSad u_long	x86_read_psl(void);
5859c412e0cSad void	x86_write_psl(u_long);
5869c412e0cSad 
5879c412e0cSad /* Use read_flags, write_flags to adjust other members of %eflags. */
5889c412e0cSad u_long	x86_read_flags(void);
5899c412e0cSad void	x86_write_flags(u_long);
5909c412e0cSad 
591decf9407Schristos void	x86_reset(void);
592decf9407Schristos 
593fe73a110Smaxv /* -------------------------------------------------------------------------- */
594fe73a110Smaxv 
5959c412e0cSad /*
5969c412e0cSad  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
5979c412e0cSad  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
5989c412e0cSad  */
5999c412e0cSad #define	OPTERON_MSR_PASSCODE	0x9c5a203aU
6009c412e0cSad 
601d3e97313Smaxv static inline uint64_t
rdmsr(u_int msr)602d3e97313Smaxv rdmsr(u_int msr)
603d3e97313Smaxv {
604d3e97313Smaxv 	uint32_t low, high;
605d3e97313Smaxv 
606e74c42e3Schristos 	__asm volatile (
607d3e97313Smaxv 		"rdmsr"
608d3e97313Smaxv 		: "=a" (low), "=d" (high)
609d3e97313Smaxv 		: "c" (msr)
610d3e97313Smaxv 	);
611d3e97313Smaxv 
612d3e97313Smaxv 	return (low | ((uint64_t)high << 32));
613d3e97313Smaxv }
614d3e97313Smaxv 
61548da5f25Smaxv static inline uint64_t
rdmsr_locked(u_int msr)61648da5f25Smaxv rdmsr_locked(u_int msr)
61748da5f25Smaxv {
61848da5f25Smaxv 	uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
61948da5f25Smaxv 
62048da5f25Smaxv 	__asm volatile (
62148da5f25Smaxv 		"rdmsr"
62248da5f25Smaxv 		: "=a" (low), "=d" (high)
62348da5f25Smaxv 		: "c" (msr), "D" (pass)
62448da5f25Smaxv 	);
62548da5f25Smaxv 
62648da5f25Smaxv 	return (low | ((uint64_t)high << 32));
62748da5f25Smaxv }
62848da5f25Smaxv 
62924e8aafdSchristos int	rdmsr_safe(u_int, uint64_t *);
63024e8aafdSchristos 
631d3e97313Smaxv static inline void
wrmsr(u_int msr,uint64_t val)632d3e97313Smaxv wrmsr(u_int msr, uint64_t val)
633d3e97313Smaxv {
634d3e97313Smaxv 	uint32_t low, high;
635d3e97313Smaxv 
636d3e97313Smaxv 	low = val;
637d3e97313Smaxv 	high = val >> 32;
638e74c42e3Schristos 	__asm volatile (
639d3e97313Smaxv 		"wrmsr"
640d3e97313Smaxv 		:
641d3e97313Smaxv 		: "a" (low), "d" (high), "c" (msr)
6425ef56094Smaxv 		: "memory"
643d3e97313Smaxv 	);
644d3e97313Smaxv }
645d3e97313Smaxv 
64648da5f25Smaxv static inline void
wrmsr_locked(u_int msr,uint64_t val)64748da5f25Smaxv wrmsr_locked(u_int msr, uint64_t val)
64848da5f25Smaxv {
64948da5f25Smaxv 	uint32_t low, high, pass = OPTERON_MSR_PASSCODE;
65048da5f25Smaxv 
65148da5f25Smaxv 	low = val;
65248da5f25Smaxv 	high = val >> 32;
65348da5f25Smaxv 	__asm volatile (
65448da5f25Smaxv 		"wrmsr"
65548da5f25Smaxv 		:
65648da5f25Smaxv 		: "a" (low), "d" (high), "c" (msr), "D" (pass)
65748da5f25Smaxv 		: "memory"
65848da5f25Smaxv 	);
65948da5f25Smaxv }
66024e8aafdSchristos 
66124e8aafdSchristos #endif /* _KERNEL */
66224e8aafdSchristos 
6639c412e0cSad #endif /* !_X86_CPUFUNC_H_ */
664