1 /* $OpenBSD: cpufunc.h,v 1.32 2019/06/28 21:54:05 bluhm Exp $ */ 2 /* $NetBSD: cpufunc.h,v 1.8 1994/10/27 04:15:59 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1993 Charles Hannum. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles Hannum. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef _MACHINE_CPUFUNC_H_ 35 #define _MACHINE_CPUFUNC_H_ 36 37 #ifdef _KERNEL 38 39 /* 40 * Functions to provide access to i386-specific instructions. 41 */ 42 43 #include <sys/types.h> 44 45 #include <machine/specialreg.h> 46 47 static __inline void invlpg(u_int); 48 static __inline void lidt(void *); 49 static __inline void lldt(u_short); 50 static __inline void ltr(u_short); 51 static __inline void lcr0(u_int); 52 static __inline u_int rcr0(void); 53 static __inline u_int rcr2(void); 54 static __inline void lcr3(u_int); 55 static __inline u_int rcr3(void); 56 static __inline void lcr4(u_int); 57 static __inline u_int rcr4(void); 58 static __inline void tlbflush(void); 59 static __inline u_int read_eflags(void); 60 static __inline void write_eflags(u_int); 61 static __inline void wbinvd(void); 62 static __inline void clflush(u_int32_t addr); 63 static __inline void mfence(void); 64 static __inline void wrmsr(u_int, u_int64_t); 65 static __inline u_int64_t rdmsr(u_int); 66 static __inline void breakpoint(void); 67 68 static __inline void 69 invlpg(u_int addr) 70 { 71 __asm volatile("invlpg (%0)" : : "r" (addr) : "memory"); 72 } 73 74 static __inline void 75 lidt(void *p) 76 { 77 __asm volatile("lidt (%0)" : : "r" (p) : "memory"); 78 } 79 80 static __inline void 81 lldt(u_short sel) 82 { 83 __asm volatile("lldt %0" : : "r" (sel)); 84 } 85 86 static __inline void 87 ltr(u_short sel) 88 { 89 __asm volatile("ltr %0" : : "r" (sel)); 90 } 91 92 static __inline void 93 lcr0(u_int val) 94 { 95 __asm volatile("movl %0,%%cr0" : : "r" (val)); 96 } 97 98 static __inline u_int 99 rcr0(void) 100 { 101 u_int val; 102 __asm volatile("movl %%cr0,%0" : "=r" (val)); 103 return val; 104 } 105 106 static __inline u_int 107 rcr2(void) 108 { 109 u_int val; 110 __asm volatile("movl %%cr2,%0" : "=r" (val)); 111 return val; 112 } 113 114 static __inline void 115 lcr3(u_int val) 116 { 117 __asm volatile("movl %0,%%cr3" : : "r" (val)); 118 } 119 120 static __inline u_int 121 rcr3(void) 122 { 123 u_int val; 124 __asm volatile("movl %%cr3,%0" : "=r" (val)); 125 return val; 126 } 127 128 static __inline void 129 lcr4(u_int val) 130 { 131 __asm volatile("movl %0,%%cr4" : : "r" (val)); 132 } 133 134 static __inline u_int 135 rcr4(void) 136 { 137 u_int val; 138 __asm volatile("movl %%cr4,%0" : "=r" (val)); 139 return val; 140 } 141 142 static __inline void 143 tlbflush(void) 144 { 145 u_int val; 146 __asm volatile("movl %%cr3,%0" : "=r" (val)); 147 __asm volatile("movl %0,%%cr3" : : "r" (val)); 148 } 149 150 #ifdef notyet 151 void setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl); 152 #endif 153 154 155 /* XXXX ought to be in psl.h with spl() functions */ 156 157 static __inline u_int 158 read_eflags(void) 159 { 160 u_int ef; 161 162 __asm volatile("pushfl; popl %0" : "=r" (ef)); 163 return (ef); 164 } 165 166 static __inline void 167 write_eflags(u_int ef) 168 { 169 __asm volatile("pushl %0; popfl" : : "r" (ef)); 170 } 171 172 static inline void 173 intr_enable(void) 174 { 175 __asm volatile("sti"); 176 } 177 178 static inline u_long 179 intr_disable(void) 180 { 181 u_long ef; 182 183 ef = read_eflags(); 184 __asm volatile("cli"); 185 return (ef); 186 } 187 188 static inline void 189 intr_restore(u_long ef) 190 { 191 write_eflags(ef); 192 } 193 194 static __inline void 195 wbinvd(void) 196 { 197 __asm volatile("wbinvd" : : : "memory"); 198 } 199 200 static __inline void 201 clflush(u_int32_t addr) 202 { 203 __asm volatile("clflush %0" : "+m" (*(volatile char *)addr)); 204 } 205 206 static __inline void 207 mfence(void) 208 { 209 __asm volatile("mfence" : : : "memory"); 210 } 211 212 static __inline u_int64_t 213 rdtsc(void) 214 { 215 uint64_t tsc; 216 217 __asm volatile("rdtsc" : "=A" (tsc)); 218 return (tsc); 219 } 220 221 static __inline void 222 wrmsr(u_int msr, u_int64_t newval) 223 { 224 __asm volatile("wrmsr" : : "A" (newval), "c" (msr)); 225 } 226 227 static __inline u_int64_t 228 rdmsr(u_int msr) 229 { 230 u_int64_t rv; 231 232 __asm volatile("rdmsr" : "=A" (rv) : "c" (msr)); 233 return (rv); 234 } 235 236 static __inline void 237 monitor(const volatile void *addr, u_long extensions, u_int hints) 238 { 239 __asm volatile("monitor" 240 : : "a" (addr), "c" (extensions), "d" (hints)); 241 } 242 243 static __inline void 244 mwait(u_long extensions, u_int hints) 245 { 246 __asm volatile("mwait" : : "a" (hints), "c" (extensions)); 247 } 248 249 /* 250 * Some of the undocumented AMD64 MSRs need a 'passcode' to access. 251 * 252 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c 253 */ 254 255 #define OPTERON_MSR_PASSCODE 0x9c5a203a 256 257 static __inline u_int64_t 258 rdmsr_locked(u_int msr, u_int code) 259 { 260 uint64_t rv; 261 __asm volatile("rdmsr" 262 : "=A" (rv) 263 : "c" (msr), "D" (code)); 264 return (rv); 265 } 266 267 static __inline void 268 wrmsr_locked(u_int msr, u_int code, u_int64_t newval) 269 { 270 __asm volatile("wrmsr" 271 : 272 : "A" (newval), "c" (msr), "D" (code)); 273 } 274 275 /* Break into DDB. */ 276 static __inline void 277 breakpoint(void) 278 { 279 __asm volatile("int $3"); 280 } 281 282 void amd64_errata(struct cpu_info *); 283 void cpu_ucode_setup(void); 284 void cpu_ucode_apply(struct cpu_info *); 285 286 struct cpu_info_full; 287 void cpu_enter_pages(struct cpu_info_full *); 288 289 #endif /* _KERNEL */ 290 #endif /* !_MACHINE_CPUFUNC_H_ */ 291