xref: /openbsd/sys/arch/amd64/include/cpufunc.h (revision 305d28e7)
1 /*	$OpenBSD: cpufunc.h,v 1.43 2024/11/08 12:08:22 bluhm Exp $	*/
2 /*	$NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $	*/
3 
4 /*-
5  * Copyright (c) 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef _MACHINE_CPUFUNC_H_
34 #define	_MACHINE_CPUFUNC_H_
35 
36 /*
37  * Functions to provide access to i386-specific instructions.
38  */
39 
40 #include <sys/types.h>
41 
42 #include <machine/specialreg.h>
43 
44 #if defined(_KERNEL) && !defined (_STANDALONE)
45 
46 static __inline void
invlpg(u_int64_t addr)47 invlpg(u_int64_t addr)
48 {
49         __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
50 }
51 
52 static __inline void
sidt(void * p)53 sidt(void *p)
54 {
55 	__asm volatile("sidt (%0)" : : "r" (p) : "memory");
56 }
57 
58 static __inline void
lidt(void * p)59 lidt(void *p)
60 {
61 	__asm volatile("lidt (%0)" : : "r" (p) : "memory");
62 }
63 
64 static __inline void
sgdt(void * p)65 sgdt(void *p)
66 {
67 	__asm volatile("sgdt (%0)" : : "r" (p) : "memory");
68 }
69 
70 static __inline void
bare_lgdt(struct region_descriptor * p)71 bare_lgdt(struct region_descriptor *p)
72 {
73 	__asm volatile("lgdt (%0)" : : "r" (p) : "memory");
74 }
75 
76 static __inline void
sldt(u_short * sel)77 sldt(u_short *sel)
78 {
79 	__asm volatile("sldt (%0)" : : "r" (sel) : "memory");
80 }
81 
82 static __inline void
lldt(u_short sel)83 lldt(u_short sel)
84 {
85 	__asm volatile("lldt %0" : : "r" (sel));
86 }
87 
88 static __inline void
ltr(u_short sel)89 ltr(u_short sel)
90 {
91 	__asm volatile("ltr %0" : : "r" (sel));
92 }
93 
94 static __inline void
lcr8(u_int val)95 lcr8(u_int val)
96 {
97 	u_int64_t val64 = val;
98 	__asm volatile("movq %0,%%cr8" : : "r" (val64));
99 }
100 
101 /*
102  * Upper 32 bits are reserved anyway, so just keep this 32bits.
103  */
104 static __inline void
lcr0(u_int val)105 lcr0(u_int val)
106 {
107 	u_int64_t val64 = val;
108 	__asm volatile("movq %0,%%cr0" : : "r" (val64));
109 }
110 
111 static __inline u_int
rcr0(void)112 rcr0(void)
113 {
114 	u_int64_t val64;
115 	u_int val;
116 	__asm volatile("movq %%cr0,%0" : "=r" (val64));
117 	val = val64;
118 	return val;
119 }
120 
121 static __inline u_int64_t
rcr2(void)122 rcr2(void)
123 {
124 	u_int64_t val;
125 	__asm volatile("movq %%cr2,%0" : "=r" (val));
126 	return val;
127 }
128 
129 static __inline void
lcr3(u_int64_t val)130 lcr3(u_int64_t val)
131 {
132 	__asm volatile("movq %0,%%cr3" : : "r" (val));
133 }
134 
135 static __inline u_int64_t
rcr3(void)136 rcr3(void)
137 {
138 	u_int64_t val;
139 	__asm volatile("movq %%cr3,%0" : "=r" (val));
140 	return val;
141 }
142 
143 /*
144  * Same as for cr0. Don't touch upper 32 bits.
145  */
146 static __inline void
lcr4(u_int val)147 lcr4(u_int val)
148 {
149 	u_int64_t val64 = val;
150 
151 	__asm volatile("movq %0,%%cr4" : : "r" (val64));
152 }
153 
154 static __inline u_int
rcr4(void)155 rcr4(void)
156 {
157 	u_int64_t val64;
158 	__asm volatile("movq %%cr4,%0" : "=r" (val64));
159 	return (u_int) val64;
160 }
161 
162 static __inline void
tlbflush(void)163 tlbflush(void)
164 {
165 	u_int64_t val;
166 	__asm volatile("movq %%cr3,%0" : "=r" (val));
167 	__asm volatile("movq %0,%%cr3" : : "r" (val));
168 }
169 
170 static inline void
invpcid(uint64_t type,paddr_t pcid,paddr_t addr)171 invpcid(uint64_t type, paddr_t pcid, paddr_t addr)
172 {
173 	uint64_t desc[2] = { pcid, addr };
174 	asm volatile("invpcid %0,%1" : : "m"(desc[0]), "r"(type));
175 }
176 #define INVPCID_ADDR		0
177 #define INVPCID_PCID		1
178 #define INVPCID_ALL		2
179 #define INVPCID_NON_GLOBAL	3
180 
181 #ifdef notyet
182 void	setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl);
183 #endif
184 
185 
186 /* XXXX ought to be in psl.h with spl() functions */
187 
188 static __inline u_long
read_rflags(void)189 read_rflags(void)
190 {
191 	u_long	ef;
192 
193 	__asm volatile("pushfq; popq %0" : "=r" (ef));
194 	return (ef);
195 }
196 
197 static __inline void
write_rflags(u_long ef)198 write_rflags(u_long ef)
199 {
200 	__asm volatile("pushq %0; popfq" : : "r" (ef));
201 }
202 
203 static __inline void
intr_enable(void)204 intr_enable(void)
205 {
206 	__asm volatile("sti");
207 }
208 
209 static __inline u_long
intr_disable(void)210 intr_disable(void)
211 {
212 	u_long ef;
213 
214 	ef = read_rflags();
215 	__asm volatile("cli");
216 	return (ef);
217 }
218 
219 static __inline void
intr_restore(u_long ef)220 intr_restore(u_long ef)
221 {
222 	write_rflags(ef);
223 }
224 
225 static __inline u_int64_t
rdmsr(u_int msr)226 rdmsr(u_int msr)
227 {
228 	uint32_t hi, lo;
229 	__asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
230 	return (((uint64_t)hi << 32) | (uint64_t) lo);
231 }
232 
233 static __inline int
rdpkru(u_int ecx)234 rdpkru(u_int ecx)
235 {
236 	uint32_t edx, pkru;
237 	asm volatile("rdpkru " : "=a" (pkru), "=d" (edx) : "c" (ecx));
238 	return pkru;
239 }
240 
241 static __inline void
wrpkru(u_int ecx,uint32_t pkru)242 wrpkru(u_int ecx, uint32_t pkru)
243 {
244 	uint32_t edx = 0;
245 	asm volatile("wrpkru" : : "a" (pkru), "c" (ecx), "d" (edx));
246 }
247 
248 static __inline void
wrmsr(u_int msr,u_int64_t newval)249 wrmsr(u_int msr, u_int64_t newval)
250 {
251 	__asm volatile("wrmsr" :
252 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
253 }
254 
255 /*
256  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
257  *
258  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
259  */
260 
261 #define	OPTERON_MSR_PASSCODE	0x9c5a203a
262 
263 static __inline u_int64_t
rdmsr_locked(u_int msr,u_int code)264 rdmsr_locked(u_int msr, u_int code)
265 {
266 	uint32_t hi, lo;
267 	__asm volatile("rdmsr"
268 	    : "=d" (hi), "=a" (lo)
269 	    : "c" (msr), "D" (code));
270 	return (((uint64_t)hi << 32) | (uint64_t) lo);
271 }
272 
273 static __inline void
wrmsr_locked(u_int msr,u_int code,u_int64_t newval)274 wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
275 {
276 	__asm volatile("wrmsr" :
277 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr), "D" (code));
278 }
279 
280 static __inline void
wbinvd(void)281 wbinvd(void)
282 {
283 	__asm volatile("wbinvd" : : : "memory");
284 }
285 
286 #ifdef MULTIPROCESSOR
287 int wbinvd_on_all_cpus(void);
288 void wbinvd_on_all_cpus_acked(void);
289 #else
290 static inline int
wbinvd_on_all_cpus(void)291 wbinvd_on_all_cpus(void)
292 {
293 	wbinvd();
294 	return 0;
295 }
296 
297 static inline int
wbinvd_on_all_cpus_acked(void)298 wbinvd_on_all_cpus_acked(void)
299 {
300 	wbinvd();
301 	return 0;
302 }
303 #endif /* MULTIPROCESSOR */
304 
305 static __inline void
clflush(u_int64_t addr)306 clflush(u_int64_t addr)
307 {
308 	__asm volatile("clflush %0" : "+m" (*(volatile char *)addr));
309 }
310 
311 static __inline void
mfence(void)312 mfence(void)
313 {
314 	__asm volatile("mfence" : : : "memory");
315 }
316 
317 static __inline u_int64_t
rdtsc(void)318 rdtsc(void)
319 {
320 	uint32_t hi, lo;
321 
322 	__asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
323 	return (((uint64_t)hi << 32) | (uint64_t) lo);
324 }
325 
326 static __inline u_int64_t
rdtscp(void)327 rdtscp(void)
328 {
329 	uint32_t hi, lo;
330 
331 	__asm volatile("rdtscp" : "=d" (hi), "=a" (lo) : : "ecx");
332 	return (((uint64_t)hi << 32) | (uint64_t) lo);
333 }
334 
335 static __inline u_int64_t
rdtsc_lfence(void)336 rdtsc_lfence(void)
337 {
338 	uint32_t hi, lo;
339 
340 	__asm volatile("lfence; rdtsc" : "=d" (hi), "=a" (lo));
341 	return (((uint64_t)hi << 32) | (uint64_t) lo);
342 }
343 
344 static __inline u_int64_t
rdpmc(u_int pmc)345 rdpmc(u_int pmc)
346 {
347 	uint32_t hi, lo;
348 
349 	__asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
350 	return (((uint64_t)hi << 32) | (uint64_t) lo);
351 }
352 
353 static __inline void
monitor(const volatile void * addr,u_long extensions,u_int hints)354 monitor(const volatile void *addr, u_long extensions, u_int hints)
355 {
356 
357 	__asm volatile("monitor"
358 	    : : "a" (addr), "c" (extensions), "d" (hints));
359 }
360 
361 static __inline void
mwait(u_long extensions,u_int hints)362 mwait(u_long extensions, u_int hints)
363 {
364 
365 	__asm volatile(
366 		"	mwait			;"
367 		"	mov	$8,%%rcx	;"
368 		"	.align	16,0x90		;"
369 		"3:	call	5f		;"
370 		"4:	pause			;"
371 		"	lfence			;"
372 		"	call	4b		;"
373 		"	.align	16,0xcc		;"
374 		"5:	call	7f		;"
375 		"6:	pause			;"
376 		"	lfence			;"
377 		"	call	6b		;"
378 		"	.align	16,0xcc		;"
379 		"7:	loop	3b		;"
380 		"	add	$(16*8),%%rsp"
381 	    : "+c" (extensions) : "a" (hints));
382 }
383 
384 static __inline void
xsetbv(uint32_t reg,uint64_t mask)385 xsetbv(uint32_t reg, uint64_t mask)
386 {
387 	uint32_t lo, hi;
388 
389 	lo = mask;
390 	hi = mask >> 32;
391 	__asm volatile("xsetbv" :: "c" (reg), "a" (lo), "d" (hi) : "memory");
392 }
393 
394 static __inline uint64_t
xgetbv(uint32_t reg)395 xgetbv(uint32_t reg)
396 {
397 	uint32_t lo, hi;
398 
399 	__asm volatile("xgetbv" : "=a" (lo), "=d" (hi) : "c" (reg));
400 
401 	return (((uint64_t)hi << 32) | (uint64_t)lo);
402 }
403 
404 static __inline void
stgi(void)405 stgi(void)
406 {
407 	__asm volatile("stgi");
408 }
409 
410 static __inline void
clgi(void)411 clgi(void)
412 {
413 	__asm volatile("clgi");
414 }
415 
416 /* Break into DDB. */
417 static __inline void
breakpoint(void)418 breakpoint(void)
419 {
420 	__asm volatile("int $3");
421 }
422 
423 void amd64_errata(struct cpu_info *);
424 void cpu_ucode_setup(void);
425 void cpu_ucode_apply(struct cpu_info *);
426 
427 struct cpu_info_full;
428 void cpu_enter_pages(struct cpu_info_full *);
429 
430 int rdmsr_safe(u_int msr, uint64_t *);
431 
432 #endif /* _KERNEL */
433 
434 #endif /* !_MACHINE_CPUFUNC_H_ */
435