1 /* $OpenBSD: cpufunc.h,v 1.41 2024/05/14 01:46:24 guenther Exp $ */
2 /* $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */
3
4 /*-
5 * Copyright (c) 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _MACHINE_CPUFUNC_H_
34 #define _MACHINE_CPUFUNC_H_
35
36 /*
37 * Functions to provide access to i386-specific instructions.
38 */
39
40 #include <sys/types.h>
41
42 #include <machine/specialreg.h>
43
44 #if defined(_KERNEL) && !defined (_STANDALONE)
45
46 static __inline void
invlpg(u_int64_t addr)47 invlpg(u_int64_t addr)
48 {
49 __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
50 }
51
52 static __inline void
sidt(void * p)53 sidt(void *p)
54 {
55 __asm volatile("sidt (%0)" : : "r" (p) : "memory");
56 }
57
58 static __inline void
lidt(void * p)59 lidt(void *p)
60 {
61 __asm volatile("lidt (%0)" : : "r" (p) : "memory");
62 }
63
64 static __inline void
sgdt(void * p)65 sgdt(void *p)
66 {
67 __asm volatile("sgdt (%0)" : : "r" (p) : "memory");
68 }
69
70 static __inline void
bare_lgdt(struct region_descriptor * p)71 bare_lgdt(struct region_descriptor *p)
72 {
73 __asm volatile("lgdt (%0)" : : "r" (p) : "memory");
74 }
75
76 static __inline void
sldt(u_short * sel)77 sldt(u_short *sel)
78 {
79 __asm volatile("sldt (%0)" : : "r" (sel) : "memory");
80 }
81
82 static __inline void
lldt(u_short sel)83 lldt(u_short sel)
84 {
85 __asm volatile("lldt %0" : : "r" (sel));
86 }
87
88 static __inline void
ltr(u_short sel)89 ltr(u_short sel)
90 {
91 __asm volatile("ltr %0" : : "r" (sel));
92 }
93
94 static __inline void
lcr8(u_int val)95 lcr8(u_int val)
96 {
97 u_int64_t val64 = val;
98 __asm volatile("movq %0,%%cr8" : : "r" (val64));
99 }
100
101 /*
102 * Upper 32 bits are reserved anyway, so just keep this 32bits.
103 */
104 static __inline void
lcr0(u_int val)105 lcr0(u_int val)
106 {
107 u_int64_t val64 = val;
108 __asm volatile("movq %0,%%cr0" : : "r" (val64));
109 }
110
111 static __inline u_int
rcr0(void)112 rcr0(void)
113 {
114 u_int64_t val64;
115 u_int val;
116 __asm volatile("movq %%cr0,%0" : "=r" (val64));
117 val = val64;
118 return val;
119 }
120
121 static __inline u_int64_t
rcr2(void)122 rcr2(void)
123 {
124 u_int64_t val;
125 __asm volatile("movq %%cr2,%0" : "=r" (val));
126 return val;
127 }
128
129 static __inline void
lcr3(u_int64_t val)130 lcr3(u_int64_t val)
131 {
132 __asm volatile("movq %0,%%cr3" : : "r" (val));
133 }
134
135 static __inline u_int64_t
rcr3(void)136 rcr3(void)
137 {
138 u_int64_t val;
139 __asm volatile("movq %%cr3,%0" : "=r" (val));
140 return val;
141 }
142
143 /*
144 * Same as for cr0. Don't touch upper 32 bits.
145 */
146 static __inline void
lcr4(u_int val)147 lcr4(u_int val)
148 {
149 u_int64_t val64 = val;
150
151 __asm volatile("movq %0,%%cr4" : : "r" (val64));
152 }
153
154 static __inline u_int
rcr4(void)155 rcr4(void)
156 {
157 u_int64_t val64;
158 __asm volatile("movq %%cr4,%0" : "=r" (val64));
159 return (u_int) val64;
160 }
161
162 static __inline void
tlbflush(void)163 tlbflush(void)
164 {
165 u_int64_t val;
166 __asm volatile("movq %%cr3,%0" : "=r" (val));
167 __asm volatile("movq %0,%%cr3" : : "r" (val));
168 }
169
170 static inline void
invpcid(uint64_t type,paddr_t pcid,paddr_t addr)171 invpcid(uint64_t type, paddr_t pcid, paddr_t addr)
172 {
173 uint64_t desc[2] = { pcid, addr };
174 asm volatile("invpcid %0,%1" : : "m"(desc[0]), "r"(type));
175 }
176 #define INVPCID_ADDR 0
177 #define INVPCID_PCID 1
178 #define INVPCID_ALL 2
179 #define INVPCID_NON_GLOBAL 3
180
181 #ifdef notyet
182 void setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl);
183 #endif
184
185
186 /* XXXX ought to be in psl.h with spl() functions */
187
188 static __inline u_long
read_rflags(void)189 read_rflags(void)
190 {
191 u_long ef;
192
193 __asm volatile("pushfq; popq %0" : "=r" (ef));
194 return (ef);
195 }
196
197 static __inline void
write_rflags(u_long ef)198 write_rflags(u_long ef)
199 {
200 __asm volatile("pushq %0; popfq" : : "r" (ef));
201 }
202
203 static __inline void
intr_enable(void)204 intr_enable(void)
205 {
206 __asm volatile("sti");
207 }
208
209 static __inline u_long
intr_disable(void)210 intr_disable(void)
211 {
212 u_long ef;
213
214 ef = read_rflags();
215 __asm volatile("cli");
216 return (ef);
217 }
218
219 static __inline void
intr_restore(u_long ef)220 intr_restore(u_long ef)
221 {
222 write_rflags(ef);
223 }
224
225 static __inline u_int64_t
rdmsr(u_int msr)226 rdmsr(u_int msr)
227 {
228 uint32_t hi, lo;
229 __asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
230 return (((uint64_t)hi << 32) | (uint64_t) lo);
231 }
232
233 static __inline int
rdpkru(u_int ecx)234 rdpkru(u_int ecx)
235 {
236 uint32_t edx, pkru;
237 asm volatile("rdpkru " : "=a" (pkru), "=d" (edx) : "c" (ecx));
238 return pkru;
239 }
240
241 static __inline void
wrpkru(u_int ecx,uint32_t pkru)242 wrpkru(u_int ecx, uint32_t pkru)
243 {
244 uint32_t edx = 0;
245 asm volatile("wrpkru" : : "a" (pkru), "c" (ecx), "d" (edx));
246 }
247
248 static __inline void
wrmsr(u_int msr,u_int64_t newval)249 wrmsr(u_int msr, u_int64_t newval)
250 {
251 __asm volatile("wrmsr" :
252 : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
253 }
254
255 /*
256 * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
257 *
258 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
259 */
260
261 #define OPTERON_MSR_PASSCODE 0x9c5a203a
262
263 static __inline u_int64_t
rdmsr_locked(u_int msr,u_int code)264 rdmsr_locked(u_int msr, u_int code)
265 {
266 uint32_t hi, lo;
267 __asm volatile("rdmsr"
268 : "=d" (hi), "=a" (lo)
269 : "c" (msr), "D" (code));
270 return (((uint64_t)hi << 32) | (uint64_t) lo);
271 }
272
273 static __inline void
wrmsr_locked(u_int msr,u_int code,u_int64_t newval)274 wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
275 {
276 __asm volatile("wrmsr" :
277 : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr), "D" (code));
278 }
279
280 static __inline void
wbinvd(void)281 wbinvd(void)
282 {
283 __asm volatile("wbinvd" : : : "memory");
284 }
285
286 #ifdef MULTIPROCESSOR
287 int wbinvd_on_all_cpus(void);
288 #else
289 static inline int
wbinvd_on_all_cpus(void)290 wbinvd_on_all_cpus(void)
291 {
292 wbinvd();
293 return 0;
294 }
295 #endif
296
297 static __inline void
clflush(u_int64_t addr)298 clflush(u_int64_t addr)
299 {
300 __asm volatile("clflush %0" : "+m" (*(volatile char *)addr));
301 }
302
303 static __inline void
mfence(void)304 mfence(void)
305 {
306 __asm volatile("mfence" : : : "memory");
307 }
308
309 static __inline u_int64_t
rdtsc(void)310 rdtsc(void)
311 {
312 uint32_t hi, lo;
313
314 __asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
315 return (((uint64_t)hi << 32) | (uint64_t) lo);
316 }
317
318 static __inline u_int64_t
rdtscp(void)319 rdtscp(void)
320 {
321 uint32_t hi, lo;
322
323 __asm volatile("rdtscp" : "=d" (hi), "=a" (lo) : : "ecx");
324 return (((uint64_t)hi << 32) | (uint64_t) lo);
325 }
326
327 static __inline u_int64_t
rdtsc_lfence(void)328 rdtsc_lfence(void)
329 {
330 uint32_t hi, lo;
331
332 __asm volatile("lfence; rdtsc" : "=d" (hi), "=a" (lo));
333 return (((uint64_t)hi << 32) | (uint64_t) lo);
334 }
335
336 static __inline u_int64_t
rdpmc(u_int pmc)337 rdpmc(u_int pmc)
338 {
339 uint32_t hi, lo;
340
341 __asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
342 return (((uint64_t)hi << 32) | (uint64_t) lo);
343 }
344
345 static __inline void
monitor(const volatile void * addr,u_long extensions,u_int hints)346 monitor(const volatile void *addr, u_long extensions, u_int hints)
347 {
348
349 __asm volatile("monitor"
350 : : "a" (addr), "c" (extensions), "d" (hints));
351 }
352
353 static __inline void
mwait(u_long extensions,u_int hints)354 mwait(u_long extensions, u_int hints)
355 {
356
357 __asm volatile(
358 " mwait ;"
359 " mov $8,%%rcx ;"
360 " .align 16,0x90 ;"
361 "3: call 5f ;"
362 "4: pause ;"
363 " lfence ;"
364 " call 4b ;"
365 " .align 16,0xcc ;"
366 "5: call 7f ;"
367 "6: pause ;"
368 " lfence ;"
369 " call 6b ;"
370 " .align 16,0xcc ;"
371 "7: loop 3b ;"
372 " add $(16*8),%%rsp"
373 : "+c" (extensions) : "a" (hints));
374 }
375
376 static __inline void
xsetbv(uint32_t reg,uint64_t mask)377 xsetbv(uint32_t reg, uint64_t mask)
378 {
379 uint32_t lo, hi;
380
381 lo = mask;
382 hi = mask >> 32;
383 __asm volatile("xsetbv" :: "c" (reg), "a" (lo), "d" (hi) : "memory");
384 }
385
386 static __inline uint64_t
xgetbv(uint32_t reg)387 xgetbv(uint32_t reg)
388 {
389 uint32_t lo, hi;
390
391 __asm volatile("xgetbv" : "=a" (lo), "=d" (hi) : "c" (reg));
392
393 return (((uint64_t)hi << 32) | (uint64_t)lo);
394 }
395
396 static __inline void
stgi(void)397 stgi(void)
398 {
399 __asm volatile("stgi");
400 }
401
402 static __inline void
clgi(void)403 clgi(void)
404 {
405 __asm volatile("clgi");
406 }
407
408 /* Break into DDB. */
409 static __inline void
breakpoint(void)410 breakpoint(void)
411 {
412 __asm volatile("int $3");
413 }
414
415 void amd64_errata(struct cpu_info *);
416 void cpu_ucode_setup(void);
417 void cpu_ucode_apply(struct cpu_info *);
418
419 struct cpu_info_full;
420 void cpu_enter_pages(struct cpu_info_full *);
421
422 int rdmsr_safe(u_int msr, uint64_t *);
423
424 #endif /* _KERNEL */
425
426 #endif /* !_MACHINE_CPUFUNC_H_ */
427