1 /* $OpenBSD: cpufunc.h,v 1.33 2020/09/13 11:53:16 jsg Exp $ */ 2 /* $NetBSD: cpufunc.h,v 1.8 1994/10/27 04:15:59 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1993 Charles Hannum. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles Hannum. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef _MACHINE_CPUFUNC_H_ 35 #define _MACHINE_CPUFUNC_H_ 36 37 #ifdef _KERNEL 38 39 /* 40 * Functions to provide access to i386-specific instructions. 41 */ 42 43 #include <sys/types.h> 44 45 #include <machine/specialreg.h> 46 47 static __inline void invlpg(u_int); 48 static __inline void lidt(void *); 49 static __inline void lldt(u_short); 50 static __inline void ltr(u_short); 51 static __inline void lcr0(u_int); 52 static __inline u_int rcr0(void); 53 static __inline u_int rcr2(void); 54 static __inline void lcr3(u_int); 55 static __inline u_int rcr3(void); 56 static __inline void lcr4(u_int); 57 static __inline u_int rcr4(void); 58 static __inline void tlbflush(void); 59 static __inline u_int read_eflags(void); 60 static __inline void write_eflags(u_int); 61 static __inline void wbinvd(void); 62 static __inline void clflush(u_int32_t addr); 63 static __inline void mfence(void); 64 static __inline void wrmsr(u_int, u_int64_t); 65 static __inline u_int64_t rdmsr(u_int); 66 static __inline void breakpoint(void); 67 68 static __inline void 69 invlpg(u_int addr) 70 { 71 __asm volatile("invlpg (%0)" : : "r" (addr) : "memory"); 72 } 73 74 static __inline void 75 lidt(void *p) 76 { 77 __asm volatile("lidt (%0)" : : "r" (p) : "memory"); 78 } 79 80 static __inline void 81 lldt(u_short sel) 82 { 83 __asm volatile("lldt %0" : : "r" (sel)); 84 } 85 86 static __inline void 87 ltr(u_short sel) 88 { 89 __asm volatile("ltr %0" : : "r" (sel)); 90 } 91 92 static __inline void 93 lcr0(u_int val) 94 { 95 __asm volatile("movl %0,%%cr0" : : "r" (val)); 96 } 97 98 static __inline u_int 99 rcr0(void) 100 { 101 u_int val; 102 __asm volatile("movl %%cr0,%0" : "=r" (val)); 103 return val; 104 } 105 106 static __inline u_int 107 rcr2(void) 108 { 109 u_int val; 110 __asm volatile("movl %%cr2,%0" : "=r" (val)); 111 return val; 112 } 113 114 static __inline void 115 lcr3(u_int val) 116 { 117 __asm volatile("movl %0,%%cr3" : : "r" (val)); 118 } 119 120 static __inline u_int 121 rcr3(void) 122 { 123 u_int val; 124 __asm volatile("movl %%cr3,%0" : "=r" (val)); 125 return val; 126 } 127 128 static __inline void 129 lcr4(u_int val) 130 { 131 __asm volatile("movl %0,%%cr4" : : "r" (val)); 132 } 133 134 static __inline u_int 135 rcr4(void) 136 { 137 u_int val; 138 __asm volatile("movl %%cr4,%0" : "=r" (val)); 139 return val; 140 } 141 142 static __inline void 143 tlbflush(void) 144 { 145 u_int val; 146 __asm volatile("movl %%cr3,%0" : "=r" (val)); 147 __asm volatile("movl %0,%%cr3" : : "r" (val)); 148 } 149 150 #ifdef notyet 151 void setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl); 152 #endif 153 154 155 /* XXXX ought to be in psl.h with spl() functions */ 156 157 static __inline u_int 158 read_eflags(void) 159 { 160 u_int ef; 161 162 __asm volatile("pushfl; popl %0" : "=r" (ef)); 163 return (ef); 164 } 165 166 static __inline void 167 write_eflags(u_int ef) 168 { 169 __asm volatile("pushl %0; popfl" : : "r" (ef)); 170 } 171 172 static inline void 173 intr_enable(void) 174 { 175 __asm volatile("sti"); 176 } 177 178 static inline u_long 179 intr_disable(void) 180 { 181 u_long ef; 182 183 ef = read_eflags(); 184 __asm volatile("cli"); 185 return (ef); 186 } 187 188 static inline void 189 intr_restore(u_long ef) 190 { 191 write_eflags(ef); 192 } 193 194 static __inline void 195 wbinvd(void) 196 { 197 __asm volatile("wbinvd" : : : "memory"); 198 } 199 200 #ifdef MULTIPROCESSOR 201 int wbinvd_on_all_cpus(void); 202 #else 203 static inline int 204 wbinvd_on_all_cpus(void) 205 { 206 wbinvd(); 207 return 0; 208 } 209 #endif 210 211 static __inline void 212 clflush(u_int32_t addr) 213 { 214 __asm volatile("clflush %0" : "+m" (*(volatile char *)addr)); 215 } 216 217 static __inline void 218 mfence(void) 219 { 220 __asm volatile("mfence" : : : "memory"); 221 } 222 223 static __inline u_int64_t 224 rdtsc(void) 225 { 226 uint64_t tsc; 227 228 __asm volatile("rdtsc" : "=A" (tsc)); 229 return (tsc); 230 } 231 232 static __inline void 233 wrmsr(u_int msr, u_int64_t newval) 234 { 235 __asm volatile("wrmsr" : : "A" (newval), "c" (msr)); 236 } 237 238 static __inline u_int64_t 239 rdmsr(u_int msr) 240 { 241 u_int64_t rv; 242 243 __asm volatile("rdmsr" : "=A" (rv) : "c" (msr)); 244 return (rv); 245 } 246 247 static __inline void 248 monitor(const volatile void *addr, u_long extensions, u_int hints) 249 { 250 __asm volatile("monitor" 251 : : "a" (addr), "c" (extensions), "d" (hints)); 252 } 253 254 static __inline void 255 mwait(u_long extensions, u_int hints) 256 { 257 __asm volatile("mwait" : : "a" (hints), "c" (extensions)); 258 } 259 260 /* 261 * Some of the undocumented AMD64 MSRs need a 'passcode' to access. 262 * 263 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c 264 */ 265 266 #define OPTERON_MSR_PASSCODE 0x9c5a203a 267 268 static __inline u_int64_t 269 rdmsr_locked(u_int msr, u_int code) 270 { 271 uint64_t rv; 272 __asm volatile("rdmsr" 273 : "=A" (rv) 274 : "c" (msr), "D" (code)); 275 return (rv); 276 } 277 278 static __inline void 279 wrmsr_locked(u_int msr, u_int code, u_int64_t newval) 280 { 281 __asm volatile("wrmsr" 282 : 283 : "A" (newval), "c" (msr), "D" (code)); 284 } 285 286 /* Break into DDB. */ 287 static __inline void 288 breakpoint(void) 289 { 290 __asm volatile("int $3"); 291 } 292 293 void amd64_errata(struct cpu_info *); 294 void cpu_ucode_setup(void); 295 void cpu_ucode_apply(struct cpu_info *); 296 297 struct cpu_info_full; 298 void cpu_enter_pages(struct cpu_info_full *); 299 300 #endif /* _KERNEL */ 301 #endif /* !_MACHINE_CPUFUNC_H_ */ 302