1 /*- 2 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 2008-2017 The DragonFly Project. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 40 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ 41 */ 42 43 //#include "use_npx.h" 44 #include "use_isa.h" 45 #include "opt_cpu.h" 46 #include "opt_ddb.h" 47 #include "opt_inet.h" 48 #include "opt_msgbuf.h" 49 #include "opt_swap.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/sysproto.h> 54 #include <sys/signalvar.h> 55 #include <sys/kernel.h> 56 #include <sys/linker.h> 57 #include <sys/malloc.h> 58 #include <sys/proc.h> 59 #include <sys/priv.h> 60 #include <sys/buf.h> 61 #include <sys/reboot.h> 62 #include <sys/mbuf.h> 63 #include <sys/msgbuf.h> 64 #include <sys/sysent.h> 65 #include <sys/sysctl.h> 66 #include <sys/vmmeter.h> 67 #include <sys/bus.h> 68 #include <sys/usched.h> 69 #include <sys/reg.h> 70 #include <sys/sbuf.h> 71 #include <sys/ctype.h> 72 #include <sys/serialize.h> 73 #include <sys/systimer.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_param.h> 77 #include <sys/lock.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_object.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_pager.h> 83 #include <vm/vm_extern.h> 84 85 #include <sys/thread2.h> 86 #include <sys/mplock2.h> 87 #include <sys/mutex2.h> 88 89 #include <sys/exec.h> 90 #include <sys/cons.h> 91 92 #include <sys/efi.h> 93 94 #include <ddb/ddb.h> 95 96 #include <machine/cpu.h> 97 #include <machine/clock.h> 98 #include <machine/specialreg.h> 99 #if 0 /* JG */ 100 #include <machine/bootinfo.h> 101 #endif 102 #include <machine/md_var.h> 103 #include <machine/metadata.h> 104 #include <machine/pc/bios.h> 105 #include <machine/pcb_ext.h> 106 #include <machine/globaldata.h> /* CPU_prvspace */ 107 #include <machine/smp.h> 108 #include <machine/cputypes.h> 109 #include <machine/intr_machdep.h> 110 #include <machine/framebuffer.h> 111 112 #ifdef OLD_BUS_ARCH 113 #include <bus/isa/isa_device.h> 114 #endif 115 #include <machine_base/isa/isa_intr.h> 116 #include <bus/isa/rtc.h> 117 #include <sys/random.h> 118 #include <sys/ptrace.h> 119 #include <machine/sigframe.h> 120 121 #include <sys/machintr.h> 122 #include <machine_base/icu/icu_abi.h> 123 #include <machine_base/icu/elcr_var.h> 124 #include <machine_base/apic/lapic.h> 125 #include <machine_base/apic/ioapic.h> 126 #include <machine_base/apic/ioapic_abi.h> 127 #include <machine/mptable.h> 128 129 #define PHYSMAP_ENTRIES 10 130 #define MAXBUFSTRUCTSIZE ((size_t)512 * 1024 * 1024) 131 132 extern u_int64_t hammer_time(u_int64_t, u_int64_t); 133 134 extern void printcpuinfo(void); /* XXX header file */ 135 extern void identify_cpu(void); 136 extern void panicifcpuunsupported(void); 137 138 static void cpu_startup(void *); 139 static void pic_finish(void *); 140 static void cpu_finish(void *); 141 142 static void set_fpregs_xmm(struct save87 *, struct savexmm *); 143 static void fill_fpregs_xmm(struct savexmm *, struct save87 *); 144 static void init_locks(void); 145 146 extern void pcpu_timer_always(struct intrframe *); 147 148 SYSINIT(cpu, SI_BOOT2_START_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 149 SYSINIT(pic_finish, SI_BOOT2_FINISH_PIC, SI_ORDER_FIRST, pic_finish, NULL); 150 SYSINIT(cpu_finish, SI_BOOT2_FINISH_CPU, SI_ORDER_FIRST, cpu_finish, NULL); 151 152 #ifdef DDB 153 extern vm_offset_t ksym_start, ksym_end; 154 #endif 155 156 struct privatespace CPU_prvspace_bsp __aligned(4096); 157 struct privatespace *CPU_prvspace[MAXCPU] = { &CPU_prvspace_bsp }; 158 159 vm_paddr_t efi_systbl_phys; 160 int _udatasel, _ucodesel, _ucode32sel; 161 u_long atdevbase; 162 int64_t tsc_offsets[MAXCPU]; 163 cpumask_t smp_idleinvl_mask; 164 cpumask_t smp_idleinvl_reqs; 165 166 static int cpu_mwait_halt_global; /* MWAIT hint (EAX) or CPU_MWAIT_HINT_ */ 167 168 #if defined(SWTCH_OPTIM_STATS) 169 extern int swtch_optim_stats; 170 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 171 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 172 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 173 CTLFLAG_RD, &tlb_flush_count, 0, ""); 174 #endif 175 SYSCTL_INT(_hw, OID_AUTO, cpu_mwait_halt, 176 CTLFLAG_RD, &cpu_mwait_halt_global, 0, ""); 177 SYSCTL_INT(_hw, OID_AUTO, cpu_mwait_spin, 178 CTLFLAG_RD, &cpu_mwait_spin, 0, "monitor/mwait target state"); 179 180 #define CPU_MWAIT_HAS_CX \ 181 ((cpu_feature2 & CPUID2_MON) && \ 182 (cpu_mwait_feature & CPUID_MWAIT_EXT)) 183 184 #define CPU_MWAIT_CX_NAMELEN 16 185 186 #define CPU_MWAIT_C1 1 187 #define CPU_MWAIT_C2 2 188 #define CPU_MWAIT_C3 3 189 #define CPU_MWAIT_CX_MAX 8 190 191 #define CPU_MWAIT_HINT_AUTO -1 /* C1 and C2 */ 192 #define CPU_MWAIT_HINT_AUTODEEP -2 /* C3+ */ 193 194 SYSCTL_NODE(_machdep, OID_AUTO, mwait, CTLFLAG_RW, 0, "MWAIT features"); 195 SYSCTL_NODE(_machdep_mwait, OID_AUTO, CX, CTLFLAG_RW, 0, "MWAIT Cx settings"); 196 197 struct cpu_mwait_cx { 198 int subcnt; 199 char name[4]; 200 struct sysctl_ctx_list sysctl_ctx; 201 struct sysctl_oid *sysctl_tree; 202 }; 203 static struct cpu_mwait_cx cpu_mwait_cx_info[CPU_MWAIT_CX_MAX]; 204 static char cpu_mwait_cx_supported[256]; 205 206 static int cpu_mwait_c1_hints_cnt; 207 static int cpu_mwait_hints_cnt; 208 static int *cpu_mwait_hints; 209 210 static int cpu_mwait_deep_hints_cnt; 211 static int *cpu_mwait_deep_hints; 212 213 #define CPU_IDLE_REPEAT_DEFAULT 750 214 215 static u_int cpu_idle_repeat = CPU_IDLE_REPEAT_DEFAULT; 216 static u_long cpu_idle_repeat_max = CPU_IDLE_REPEAT_DEFAULT; 217 static u_int cpu_mwait_repeat_shift = 1; 218 219 #define CPU_MWAIT_C3_PREAMBLE_BM_ARB 0x1 220 #define CPU_MWAIT_C3_PREAMBLE_BM_STS 0x2 221 222 static int cpu_mwait_c3_preamble = 223 CPU_MWAIT_C3_PREAMBLE_BM_ARB | 224 CPU_MWAIT_C3_PREAMBLE_BM_STS; 225 226 SYSCTL_STRING(_machdep_mwait_CX, OID_AUTO, supported, CTLFLAG_RD, 227 cpu_mwait_cx_supported, 0, "MWAIT supported C states"); 228 SYSCTL_INT(_machdep_mwait_CX, OID_AUTO, c3_preamble, CTLFLAG_RD, 229 &cpu_mwait_c3_preamble, 0, "C3+ preamble mask"); 230 231 static int cpu_mwait_cx_select_sysctl(SYSCTL_HANDLER_ARGS, 232 int *, boolean_t); 233 static int cpu_mwait_cx_idle_sysctl(SYSCTL_HANDLER_ARGS); 234 static int cpu_mwait_cx_pcpu_idle_sysctl(SYSCTL_HANDLER_ARGS); 235 static int cpu_mwait_cx_spin_sysctl(SYSCTL_HANDLER_ARGS); 236 237 SYSCTL_PROC(_machdep_mwait_CX, OID_AUTO, idle, CTLTYPE_STRING|CTLFLAG_RW, 238 NULL, 0, cpu_mwait_cx_idle_sysctl, "A", ""); 239 SYSCTL_PROC(_machdep_mwait_CX, OID_AUTO, spin, CTLTYPE_STRING|CTLFLAG_RW, 240 NULL, 0, cpu_mwait_cx_spin_sysctl, "A", ""); 241 SYSCTL_UINT(_machdep_mwait_CX, OID_AUTO, repeat_shift, CTLFLAG_RW, 242 &cpu_mwait_repeat_shift, 0, ""); 243 244 long physmem = 0; 245 246 u_long ebda_addr = 0; 247 248 int imcr_present = 0; 249 250 int naps = 0; /* # of Applications processors */ 251 252 u_int base_memory; 253 254 static int 255 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 256 { 257 u_long pmem = ctob(physmem); 258 int error; 259 260 error = sysctl_handle_long(oidp, &pmem, 0, req); 261 262 return (error); 263 } 264 265 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_ULONG|CTLFLAG_RD, 266 0, 0, sysctl_hw_physmem, "LU", 267 "Total system memory in bytes (number of pages * page size)"); 268 269 static int 270 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 271 { 272 u_long usermem = ctob(physmem - vmstats.v_wire_count); 273 int error; 274 275 error = sysctl_handle_long(oidp, &usermem, 0, req); 276 277 return (error); 278 } 279 280 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_ULONG|CTLFLAG_RD, 281 0, 0, sysctl_hw_usermem, "LU", ""); 282 283 static int 284 sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 285 { 286 int error; 287 u_long availpages; 288 289 availpages = x86_64_btop(avail_end - avail_start); 290 error = sysctl_handle_long(oidp, &availpages, 0, req); 291 292 return (error); 293 } 294 295 SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_ULONG|CTLFLAG_RD, 296 0, 0, sysctl_hw_availpages, "LU", ""); 297 298 vm_paddr_t Maxmem; 299 vm_paddr_t Realmem; 300 301 /* 302 * The number of PHYSMAP entries must be one less than the number of 303 * PHYSSEG entries because the PHYSMAP entry that spans the largest 304 * physical address that is accessible by ISA DMA is split into two 305 * PHYSSEG entries. 306 */ 307 vm_phystable_t phys_avail[VM_PHYSSEG_MAX + 1]; 308 vm_phystable_t dump_avail[VM_PHYSSEG_MAX + 1]; 309 310 /* must be 1 less so 0 0 can signal end of chunks */ 311 #define PHYS_AVAIL_ARRAY_END (NELEM(phys_avail) - 1) 312 #define DUMP_AVAIL_ARRAY_END (NELEM(dump_avail) - 1) 313 314 static vm_offset_t buffer_sva, buffer_eva; 315 vm_offset_t clean_sva, clean_eva; 316 static vm_offset_t pager_sva, pager_eva; 317 static struct trapframe proc0_tf; 318 319 static void cpu_implement_smap(void); 320 321 static void 322 cpu_startup(void *dummy) 323 { 324 caddr_t v; 325 vm_size_t size = 0; 326 vm_offset_t firstaddr; 327 328 /* 329 * Good {morning,afternoon,evening,night}. 330 */ 331 kprintf("%s", version); 332 startrtclock(); 333 printcpuinfo(); 334 panicifcpuunsupported(); 335 if (cpu_stdext_feature & CPUID_STDEXT_SMAP) 336 cpu_implement_smap(); 337 338 kprintf("real memory = %ju (%ju MB)\n", 339 (intmax_t)Realmem, 340 (intmax_t)Realmem / 1024 / 1024); 341 /* 342 * Display any holes after the first chunk of extended memory. 343 */ 344 if (bootverbose) { 345 int indx; 346 347 kprintf("Physical memory chunk(s):\n"); 348 for (indx = 0; phys_avail[indx].phys_end != 0; ++indx) { 349 vm_paddr_t size1; 350 351 size1 = phys_avail[indx].phys_end - 352 phys_avail[indx].phys_beg; 353 354 kprintf("0x%08jx - 0x%08jx, %ju bytes (%ju pages)\n", 355 (intmax_t)phys_avail[indx].phys_beg, 356 (intmax_t)phys_avail[indx].phys_end - 1, 357 (intmax_t)size1, 358 (intmax_t)(size1 / PAGE_SIZE)); 359 } 360 } 361 362 /* 363 * Allocate space for system data structures. 364 * The first available kernel virtual address is in "v". 365 * As pages of kernel virtual memory are allocated, "v" is incremented. 366 * As pages of memory are allocated and cleared, 367 * "firstaddr" is incremented. 368 * An index into the kernel page table corresponding to the 369 * virtual memory address maintained in "v" is kept in "mapaddr". 370 */ 371 372 /* 373 * Make two passes. The first pass calculates how much memory is 374 * needed and allocates it. The second pass assigns virtual 375 * addresses to the various data structures. 376 */ 377 firstaddr = 0; 378 again: 379 v = (caddr_t)firstaddr; 380 381 #define valloc(name, type, num) \ 382 (name) = (type *)v; v = (caddr_t)((name)+(num)) 383 #define valloclim(name, type, num, lim) \ 384 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 385 386 /* 387 * Calculate nbuf such that maxbufspace uses approximately 1/20 388 * of physical memory by default, with a minimum of 50 buffers. 389 * 390 * The calculation is made after discounting 128MB. 391 * 392 * NOTE: maxbufspace is (nbuf * NBUFCALCSIZE) (NBUFCALCSIZE ~= 16KB). 393 * nbuf = (kbytes / factor) would cover all of memory. 394 */ 395 if (nbuf == 0) { 396 long factor = NBUFCALCSIZE / 1024; /* KB/nbuf */ 397 long kbytes = physmem * (PAGE_SIZE / 1024); /* physmem */ 398 399 nbuf = 50; 400 if (kbytes > 128 * 1024) 401 nbuf += (kbytes - 128 * 1024) / (factor * 20); 402 if (maxbcache && nbuf > maxbcache / NBUFCALCSIZE) 403 nbuf = maxbcache / NBUFCALCSIZE; 404 if ((size_t)nbuf * sizeof(struct buf) > MAXBUFSTRUCTSIZE) { 405 kprintf("Warning: nbuf capped at %ld due to the " 406 "reasonability limit\n", nbuf); 407 nbuf = MAXBUFSTRUCTSIZE / sizeof(struct buf); 408 } 409 } 410 411 /* 412 * Do not allow the buffer_map to be more then 1/2 the size of the 413 * kernel_map. 414 */ 415 if (nbuf > (virtual_end - virtual_start + 416 virtual2_end - virtual2_start) / (MAXBSIZE * 2)) { 417 nbuf = (virtual_end - virtual_start + 418 virtual2_end - virtual2_start) / (MAXBSIZE * 2); 419 kprintf("Warning: nbufs capped at %ld due to kvm\n", nbuf); 420 } 421 422 /* 423 * Do not allow the buffer_map to use more than 50% of available 424 * physical-equivalent memory. Since the VM pages which back 425 * individual buffers are typically wired, having too many bufs 426 * can prevent the system from paging properly. 427 */ 428 if (nbuf > physmem * PAGE_SIZE / (NBUFCALCSIZE * 2)) { 429 nbuf = physmem * PAGE_SIZE / (NBUFCALCSIZE * 2); 430 kprintf("Warning: nbufs capped at %ld due to physmem\n", nbuf); 431 } 432 433 /* 434 * Do not allow the sizeof(struct buf) * nbuf to exceed 1/4 of 435 * the valloc space which is just the virtual_end - virtual_start 436 * section. This is typically ~2GB regardless of the amount of 437 * memory, so we use 500MB as a metric. 438 * 439 * This is because we use valloc() to allocate the buf header array. 440 * 441 * NOTE: buffer space in bytes is limited by vfs.*bufspace sysctls. 442 */ 443 if (nbuf > (virtual_end - virtual_start) / (sizeof(struct buf) * 4)) { 444 nbuf = (virtual_end - virtual_start) / 445 (sizeof(struct buf) * 4); 446 kprintf("Warning: nbufs capped at %ld due to " 447 "valloc considerations\n", 448 nbuf); 449 } 450 451 nswbuf_mem = lmax(lmin(nbuf / 32, 512), 8); 452 #ifdef NSWBUF_MIN 453 if (nswbuf_mem < NSWBUF_MIN) 454 nswbuf_mem = NSWBUF_MIN; 455 #endif 456 nswbuf_kva = lmax(lmin(nbuf / 4, 512), 16); 457 #ifdef NSWBUF_MIN 458 if (nswbuf_kva < NSWBUF_MIN) 459 nswbuf_kva = NSWBUF_MIN; 460 #endif 461 462 valloc(swbuf_mem, struct buf, nswbuf_mem); 463 valloc(swbuf_kva, struct buf, nswbuf_kva); 464 valloc(buf, struct buf, nbuf); 465 466 /* 467 * End of first pass, size has been calculated so allocate memory 468 */ 469 if (firstaddr == 0) { 470 size = (vm_size_t)(v - firstaddr); 471 firstaddr = kmem_alloc(&kernel_map, round_page(size), 472 VM_SUBSYS_BUF); 473 if (firstaddr == 0) 474 panic("startup: no room for tables"); 475 goto again; 476 } 477 478 /* 479 * End of second pass, addresses have been assigned 480 * 481 * nbuf is an int, make sure we don't overflow the field. 482 * 483 * On 64-bit systems we always reserve maximal allocations for 484 * buffer cache buffers and there are no fragmentation issues, 485 * so the KVA segment does not have to be excessively oversized. 486 */ 487 if ((vm_size_t)(v - firstaddr) != size) 488 panic("startup: table size inconsistency"); 489 490 kmem_suballoc(&kernel_map, &clean_map, &clean_sva, &clean_eva, 491 ((vm_offset_t)(nbuf + 16) * MAXBSIZE) + 492 ((nswbuf_mem + nswbuf_kva) * MAXPHYS) + pager_map_size); 493 kmem_suballoc(&clean_map, &buffer_map, &buffer_sva, &buffer_eva, 494 ((vm_offset_t)(nbuf + 16) * MAXBSIZE)); 495 buffer_map.system_map = 1; 496 kmem_suballoc(&clean_map, &pager_map, &pager_sva, &pager_eva, 497 ((vm_offset_t)(nswbuf_mem + nswbuf_kva) * MAXPHYS) + 498 pager_map_size); 499 pager_map.system_map = 1; 500 kprintf("avail memory = %ju (%ju MB)\n", 501 (uintmax_t)ptoa(vmstats.v_free_count + vmstats.v_dma_pages), 502 (uintmax_t)ptoa(vmstats.v_free_count + vmstats.v_dma_pages) / 503 1024 / 1024); 504 } 505 506 struct cpu_idle_stat { 507 int hint; 508 int reserved; 509 u_long halt; 510 u_long spin; 511 u_long repeat; 512 u_long repeat_last; 513 u_long repeat_delta; 514 u_long mwait_cx[CPU_MWAIT_CX_MAX]; 515 } __cachealign; 516 517 #define CPU_IDLE_STAT_HALT -1 518 #define CPU_IDLE_STAT_SPIN -2 519 520 static struct cpu_idle_stat cpu_idle_stats[MAXCPU]; 521 522 static int 523 sysctl_cpu_idle_cnt(SYSCTL_HANDLER_ARGS) 524 { 525 int idx = arg2, cpu, error; 526 u_long val = 0; 527 528 if (idx == CPU_IDLE_STAT_HALT) { 529 for (cpu = 0; cpu < ncpus; ++cpu) 530 val += cpu_idle_stats[cpu].halt; 531 } else if (idx == CPU_IDLE_STAT_SPIN) { 532 for (cpu = 0; cpu < ncpus; ++cpu) 533 val += cpu_idle_stats[cpu].spin; 534 } else { 535 KASSERT(idx >= 0 && idx < CPU_MWAIT_CX_MAX, 536 ("invalid index %d", idx)); 537 for (cpu = 0; cpu < ncpus; ++cpu) 538 val += cpu_idle_stats[cpu].mwait_cx[idx]; 539 } 540 541 error = sysctl_handle_quad(oidp, &val, 0, req); 542 if (error || req->newptr == NULL) 543 return error; 544 545 if (idx == CPU_IDLE_STAT_HALT) { 546 for (cpu = 0; cpu < ncpus; ++cpu) 547 cpu_idle_stats[cpu].halt = 0; 548 cpu_idle_stats[0].halt = val; 549 } else if (idx == CPU_IDLE_STAT_SPIN) { 550 for (cpu = 0; cpu < ncpus; ++cpu) 551 cpu_idle_stats[cpu].spin = 0; 552 cpu_idle_stats[0].spin = val; 553 } else { 554 KASSERT(idx >= 0 && idx < CPU_MWAIT_CX_MAX, 555 ("invalid index %d", idx)); 556 for (cpu = 0; cpu < ncpus; ++cpu) 557 cpu_idle_stats[cpu].mwait_cx[idx] = 0; 558 cpu_idle_stats[0].mwait_cx[idx] = val; 559 } 560 return 0; 561 } 562 563 static void 564 cpu_mwait_attach(void) 565 { 566 struct sbuf sb; 567 int hint_idx, i; 568 569 if (!CPU_MWAIT_HAS_CX) 570 return; 571 572 if (cpu_vendor_id == CPU_VENDOR_INTEL && 573 (CPUID_TO_FAMILY(cpu_id) > 0xf || 574 (CPUID_TO_FAMILY(cpu_id) == 0x6 && 575 CPUID_TO_MODEL(cpu_id) >= 0xf))) { 576 int bm_sts = 1; 577 578 /* 579 * Pentium dual-core, Core 2 and beyond do not need any 580 * additional activities to enter deep C-state, i.e. C3(+). 581 */ 582 cpu_mwait_cx_no_bmarb(); 583 584 TUNABLE_INT_FETCH("machdep.cpu.mwait.bm_sts", &bm_sts); 585 if (!bm_sts) 586 cpu_mwait_cx_no_bmsts(); 587 } 588 589 sbuf_new(&sb, cpu_mwait_cx_supported, 590 sizeof(cpu_mwait_cx_supported), SBUF_FIXEDLEN); 591 592 for (i = 0; i < CPU_MWAIT_CX_MAX; ++i) { 593 struct cpu_mwait_cx *cx = &cpu_mwait_cx_info[i]; 594 int sub; 595 596 ksnprintf(cx->name, sizeof(cx->name), "C%d", i); 597 598 sysctl_ctx_init(&cx->sysctl_ctx); 599 cx->sysctl_tree = SYSCTL_ADD_NODE(&cx->sysctl_ctx, 600 SYSCTL_STATIC_CHILDREN(_machdep_mwait), OID_AUTO, 601 cx->name, CTLFLAG_RW, NULL, "Cx control/info"); 602 if (cx->sysctl_tree == NULL) 603 continue; 604 605 cx->subcnt = CPUID_MWAIT_CX_SUBCNT(cpu_mwait_extemu, i); 606 SYSCTL_ADD_INT(&cx->sysctl_ctx, 607 SYSCTL_CHILDREN(cx->sysctl_tree), OID_AUTO, 608 "subcnt", CTLFLAG_RD, &cx->subcnt, 0, 609 "sub-state count"); 610 SYSCTL_ADD_PROC(&cx->sysctl_ctx, 611 SYSCTL_CHILDREN(cx->sysctl_tree), OID_AUTO, 612 "entered", (CTLTYPE_QUAD | CTLFLAG_RW), 0, 613 i, sysctl_cpu_idle_cnt, "Q", "# of times entered"); 614 615 for (sub = 0; sub < cx->subcnt; ++sub) 616 sbuf_printf(&sb, "C%d/%d ", i, sub); 617 } 618 sbuf_trim(&sb); 619 sbuf_finish(&sb); 620 621 /* 622 * Non-deep C-states 623 */ 624 cpu_mwait_c1_hints_cnt = cpu_mwait_cx_info[CPU_MWAIT_C1].subcnt; 625 for (i = CPU_MWAIT_C1; i < CPU_MWAIT_C3; ++i) 626 cpu_mwait_hints_cnt += cpu_mwait_cx_info[i].subcnt; 627 cpu_mwait_hints = kmalloc(sizeof(int) * cpu_mwait_hints_cnt, 628 M_DEVBUF, M_WAITOK); 629 630 hint_idx = 0; 631 for (i = CPU_MWAIT_C1; i < CPU_MWAIT_C3; ++i) { 632 int j, subcnt; 633 634 subcnt = cpu_mwait_cx_info[i].subcnt; 635 for (j = 0; j < subcnt; ++j) { 636 KASSERT(hint_idx < cpu_mwait_hints_cnt, 637 ("invalid mwait hint index %d", hint_idx)); 638 cpu_mwait_hints[hint_idx] = MWAIT_EAX_HINT(i, j); 639 ++hint_idx; 640 } 641 } 642 KASSERT(hint_idx == cpu_mwait_hints_cnt, 643 ("mwait hint count %d != index %d", 644 cpu_mwait_hints_cnt, hint_idx)); 645 646 if (bootverbose) { 647 kprintf("MWAIT hints (%d C1 hints):\n", cpu_mwait_c1_hints_cnt); 648 for (i = 0; i < cpu_mwait_hints_cnt; ++i) { 649 int hint = cpu_mwait_hints[i]; 650 651 kprintf(" C%d/%d hint 0x%04x\n", 652 MWAIT_EAX_TO_CX(hint), MWAIT_EAX_TO_CX_SUB(hint), 653 hint); 654 } 655 } 656 657 /* 658 * Deep C-states 659 */ 660 for (i = CPU_MWAIT_C1; i < CPU_MWAIT_CX_MAX; ++i) 661 cpu_mwait_deep_hints_cnt += cpu_mwait_cx_info[i].subcnt; 662 cpu_mwait_deep_hints = kmalloc(sizeof(int) * cpu_mwait_deep_hints_cnt, 663 M_DEVBUF, M_WAITOK); 664 665 hint_idx = 0; 666 for (i = CPU_MWAIT_C1; i < CPU_MWAIT_CX_MAX; ++i) { 667 int j, subcnt; 668 669 subcnt = cpu_mwait_cx_info[i].subcnt; 670 for (j = 0; j < subcnt; ++j) { 671 KASSERT(hint_idx < cpu_mwait_deep_hints_cnt, 672 ("invalid mwait deep hint index %d", hint_idx)); 673 cpu_mwait_deep_hints[hint_idx] = MWAIT_EAX_HINT(i, j); 674 ++hint_idx; 675 } 676 } 677 KASSERT(hint_idx == cpu_mwait_deep_hints_cnt, 678 ("mwait deep hint count %d != index %d", 679 cpu_mwait_deep_hints_cnt, hint_idx)); 680 681 if (bootverbose) { 682 kprintf("MWAIT deep hints:\n"); 683 for (i = 0; i < cpu_mwait_deep_hints_cnt; ++i) { 684 int hint = cpu_mwait_deep_hints[i]; 685 686 kprintf(" C%d/%d hint 0x%04x\n", 687 MWAIT_EAX_TO_CX(hint), MWAIT_EAX_TO_CX_SUB(hint), 688 hint); 689 } 690 } 691 cpu_idle_repeat_max = 256 * cpu_mwait_deep_hints_cnt; 692 693 for (i = 0; i < ncpus; ++i) { 694 char name[16]; 695 696 ksnprintf(name, sizeof(name), "idle%d", i); 697 SYSCTL_ADD_PROC(NULL, 698 SYSCTL_STATIC_CHILDREN(_machdep_mwait_CX), OID_AUTO, 699 name, (CTLTYPE_STRING | CTLFLAG_RW), &cpu_idle_stats[i], 700 0, cpu_mwait_cx_pcpu_idle_sysctl, "A", ""); 701 } 702 } 703 704 static void 705 cpu_finish(void *dummy __unused) 706 { 707 cpu_setregs(); 708 cpu_mwait_attach(); 709 } 710 711 static void 712 pic_finish(void *dummy __unused) 713 { 714 /* Log ELCR information */ 715 elcr_dump(); 716 717 /* Log MPTABLE information */ 718 mptable_pci_int_dump(); 719 720 /* Finalize PCI */ 721 MachIntrABI.finalize(); 722 } 723 724 /* 725 * Send an interrupt to process. 726 * 727 * Stack is set up to allow sigcode stored 728 * at top to call routine, followed by kcall 729 * to sigreturn routine below. After sigreturn 730 * resets the signal mask, the stack, and the 731 * frame pointer, it returns to the user 732 * specified pc, psl. 733 */ 734 void 735 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 736 { 737 struct lwp *lp = curthread->td_lwp; 738 struct proc *p = lp->lwp_proc; 739 struct trapframe *regs; 740 struct sigacts *psp = p->p_sigacts; 741 struct sigframe sf, *sfp; 742 int oonstack; 743 char *sp; 744 745 regs = lp->lwp_md.md_regs; 746 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 747 748 /* Save user context */ 749 bzero(&sf, sizeof(struct sigframe)); 750 sf.sf_uc.uc_sigmask = *mask; 751 sf.sf_uc.uc_stack = lp->lwp_sigstk; 752 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 753 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0); 754 /* gcc errors out on optimized bcopy */ 755 _bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe)); 756 757 /* Make the size of the saved context visible to userland */ 758 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 759 760 /* Allocate and validate space for the signal handler context. */ 761 if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && 762 SIGISMEMBER(psp->ps_sigonstack, sig)) { 763 sp = (char *)lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - 764 sizeof(struct sigframe); 765 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 766 } else { 767 /* We take red zone into account */ 768 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 769 } 770 771 /* 772 * XXX AVX needs 64-byte alignment but sigframe has other fields and 773 * the embedded ucontext is not at the front, so aligning this won't 774 * help us. Fortunately we bcopy in/out of the sigframe, so the 775 * kernel is ok. 776 * 777 * The problem though is if userland winds up trying to use the 778 * context directly. 779 */ 780 sfp = (struct sigframe *)((intptr_t)sp & ~(intptr_t)0xF); 781 782 /* Translate the signal is appropriate */ 783 if (p->p_sysent->sv_sigtbl) { 784 if (sig <= p->p_sysent->sv_sigsize) 785 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 786 } 787 788 /* 789 * Build the argument list for the signal handler. 790 * 791 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx) 792 */ 793 regs->tf_rdi = sig; /* argument 1 */ 794 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */ 795 796 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 797 /* 798 * Signal handler installed with SA_SIGINFO. 799 * 800 * action(signo, siginfo, ucontext) 801 */ 802 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */ 803 regs->tf_rcx = (register_t)regs->tf_addr; /* argument 4 */ 804 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 805 806 /* fill siginfo structure */ 807 sf.sf_si.si_signo = sig; 808 sf.sf_si.si_pid = psp->ps_frominfo[sig].pid; 809 sf.sf_si.si_uid = psp->ps_frominfo[sig].uid; 810 sf.sf_si.si_code = code; 811 sf.sf_si.si_addr = (void *)regs->tf_addr; 812 } else { 813 /* 814 * Old FreeBSD-style arguments. 815 * 816 * handler (signo, code, [uc], addr) 817 */ 818 regs->tf_rsi = (register_t)code; /* argument 2 */ 819 regs->tf_rcx = (register_t)regs->tf_addr; /* argument 4 */ 820 sf.sf_ahu.sf_handler = catcher; 821 } 822 823 /* 824 * If we're a vm86 process, we want to save the segment registers. 825 * We also change eflags to be our emulated eflags, not the actual 826 * eflags. 827 */ 828 #if 0 /* JG */ 829 if (regs->tf_eflags & PSL_VM) { 830 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 831 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 832 833 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 834 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 835 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 836 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 837 838 if (vm86->vm86_has_vme == 0) 839 sf.sf_uc.uc_mcontext.mc_eflags = 840 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 841 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 842 843 /* 844 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 845 * syscalls made by the signal handler. This just avoids 846 * wasting time for our lazy fixup of such faults. PSL_NT 847 * does nothing in vm86 mode, but vm86 programs can set it 848 * almost legitimately in probes for old cpu types. 849 */ 850 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 851 } 852 #endif 853 854 /* 855 * Save the FPU state and reinit the FP unit 856 */ 857 npxpush(&sf.sf_uc.uc_mcontext); 858 859 /* 860 * Copy the sigframe out to the user's stack. 861 */ 862 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 863 /* 864 * Something is wrong with the stack pointer. 865 * ...Kill the process. 866 */ 867 sigexit(lp, SIGILL); 868 } 869 870 regs->tf_rsp = (register_t)sfp; 871 regs->tf_rip = trunc_page64(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); 872 regs->tf_rip -= SZSIGCODE_EXTRA_BYTES; 873 874 /* 875 * x86 abi specifies that the direction flag must be cleared 876 * on function entry 877 */ 878 regs->tf_rflags &= ~(PSL_T | PSL_D); 879 880 /* 881 * 64 bit mode has a code and stack selector but 882 * no data or extra selector. %fs and %gs are not 883 * stored in-context. 884 */ 885 regs->tf_cs = _ucodesel; 886 regs->tf_ss = _udatasel; 887 clear_quickret(); 888 } 889 890 /* 891 * Sanitize the trapframe for a virtual kernel passing control to a custom 892 * VM context. Remove any items that would otherwise create a privilage 893 * issue. 894 * 895 * XXX at the moment we allow userland to set the resume flag. Is this a 896 * bad idea? 897 */ 898 int 899 cpu_sanitize_frame(struct trapframe *frame) 900 { 901 frame->tf_cs = _ucodesel; 902 frame->tf_ss = _udatasel; 903 /* XXX VM (8086) mode not supported? */ 904 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP); 905 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I; 906 907 return(0); 908 } 909 910 /* 911 * Sanitize the tls so loading the descriptor does not blow up 912 * on us. For x86_64 we don't have to do anything. 913 */ 914 int 915 cpu_sanitize_tls(struct savetls *tls) 916 { 917 return(0); 918 } 919 920 /* 921 * sigreturn(ucontext_t *sigcntxp) 922 * 923 * System call to cleanup state after a signal 924 * has been taken. Reset signal mask and 925 * stack state from context left by sendsig (above). 926 * Return to previous pc and psl as specified by 927 * context left by sendsig. Check carefully to 928 * make sure that the user has not modified the 929 * state to gain improper privileges. 930 * 931 * MPSAFE 932 */ 933 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 934 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 935 936 int 937 sys_sigreturn(struct sigreturn_args *uap) 938 { 939 struct lwp *lp = curthread->td_lwp; 940 struct trapframe *regs; 941 ucontext_t uc; 942 ucontext_t *ucp; 943 register_t rflags; 944 int cs; 945 int error; 946 947 /* 948 * We have to copy the information into kernel space so userland 949 * can't modify it while we are sniffing it. 950 */ 951 regs = lp->lwp_md.md_regs; 952 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 953 if (error) 954 return (error); 955 ucp = &uc; 956 rflags = ucp->uc_mcontext.mc_rflags; 957 958 /* VM (8086) mode not supported */ 959 rflags &= ~PSL_VM_UNSUPP; 960 961 #if 0 /* JG */ 962 if (eflags & PSL_VM) { 963 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 964 struct vm86_kernel *vm86; 965 966 /* 967 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 968 * set up the vm86 area, and we can't enter vm86 mode. 969 */ 970 if (lp->lwp_thread->td_pcb->pcb_ext == 0) 971 return (EINVAL); 972 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 973 if (vm86->vm86_inited == 0) 974 return (EINVAL); 975 976 /* go back to user mode if both flags are set */ 977 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 978 trapsignal(lp, SIGBUS, 0); 979 980 if (vm86->vm86_has_vme) { 981 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 982 (eflags & VME_USERCHANGE) | PSL_VM; 983 } else { 984 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 985 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 986 (eflags & VM_USERCHANGE) | PSL_VM; 987 } 988 bcopy(&ucp->uc_mcontext.mc_gs, tf, sizeof(struct trapframe)); 989 tf->tf_eflags = eflags; 990 tf->tf_vm86_ds = tf->tf_ds; 991 tf->tf_vm86_es = tf->tf_es; 992 tf->tf_vm86_fs = tf->tf_fs; 993 tf->tf_vm86_gs = tf->tf_gs; 994 tf->tf_ds = _udatasel; 995 tf->tf_es = _udatasel; 996 tf->tf_fs = _udatasel; 997 tf->tf_gs = _udatasel; 998 } else 999 #endif 1000 { 1001 /* 1002 * Don't allow users to change privileged or reserved flags. 1003 */ 1004 /* 1005 * XXX do allow users to change the privileged flag PSL_RF. 1006 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 1007 * should sometimes set it there too. tf_eflags is kept in 1008 * the signal context during signal handling and there is no 1009 * other place to remember it, so the PSL_RF bit may be 1010 * corrupted by the signal handler without us knowing. 1011 * Corruption of the PSL_RF bit at worst causes one more or 1012 * one less debugger trap, so allowing it is fairly harmless. 1013 */ 1014 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 1015 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags); 1016 return(EINVAL); 1017 } 1018 1019 /* 1020 * Don't allow users to load a valid privileged %cs. Let the 1021 * hardware check for invalid selectors, excess privilege in 1022 * other selectors, invalid %eip's and invalid %esp's. 1023 */ 1024 cs = ucp->uc_mcontext.mc_cs; 1025 if (!CS_SECURE(cs)) { 1026 kprintf("sigreturn: cs = 0x%x\n", cs); 1027 trapsignal(lp, SIGBUS, T_PROTFLT); 1028 return(EINVAL); 1029 } 1030 /* gcc errors out on optimized bcopy */ 1031 _bcopy(&ucp->uc_mcontext.mc_rdi, regs, 1032 sizeof(struct trapframe)); 1033 } 1034 1035 /* 1036 * Restore the FPU state from the frame 1037 */ 1038 crit_enter(); 1039 npxpop(&ucp->uc_mcontext); 1040 1041 if (ucp->uc_mcontext.mc_onstack & 1) 1042 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 1043 else 1044 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK; 1045 1046 lp->lwp_sigmask = ucp->uc_sigmask; 1047 SIG_CANTMASK(lp->lwp_sigmask); 1048 clear_quickret(); 1049 crit_exit(); 1050 return(EJUSTRETURN); 1051 } 1052 1053 /* 1054 * Machine dependent boot() routine 1055 * 1056 * I haven't seen anything to put here yet 1057 * Possibly some stuff might be grafted back here from boot() 1058 */ 1059 void 1060 cpu_boot(int howto) 1061 { 1062 } 1063 1064 /* 1065 * Shutdown the CPU as much as possible 1066 */ 1067 void 1068 cpu_halt(void) 1069 { 1070 for (;;) 1071 __asm__ __volatile("hlt"); 1072 } 1073 1074 /* 1075 * cpu_idle() represents the idle LWKT. You cannot return from this function 1076 * (unless you want to blow things up!). Instead we look for runnable threads 1077 * and loop or halt as appropriate. Giant is not held on entry to the thread. 1078 * 1079 * The main loop is entered with a critical section held, we must release 1080 * the critical section before doing anything else. lwkt_switch() will 1081 * check for pending interrupts due to entering and exiting its own 1082 * critical section. 1083 * 1084 * NOTE: On an SMP system we rely on a scheduler IPI to wake a HLTed cpu up. 1085 * However, there are cases where the idlethread will be entered with 1086 * the possibility that no IPI will occur and in such cases 1087 * lwkt_switch() sets TDF_IDLE_NOHLT. 1088 * 1089 * NOTE: cpu_idle_repeat determines how many entries into the idle thread 1090 * must occur before it starts using ACPI halt. 1091 * 1092 * NOTE: Value overridden in hammer_time(). 1093 */ 1094 static int cpu_idle_hlt = 2; 1095 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1096 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1097 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_repeat, CTLFLAG_RW, 1098 &cpu_idle_repeat, 0, "Idle entries before acpi hlt"); 1099 1100 SYSCTL_PROC(_machdep, OID_AUTO, cpu_idle_hltcnt, (CTLTYPE_QUAD | CTLFLAG_RW), 1101 0, CPU_IDLE_STAT_HALT, sysctl_cpu_idle_cnt, "Q", "Idle loop entry halts"); 1102 SYSCTL_PROC(_machdep, OID_AUTO, cpu_idle_spincnt, (CTLTYPE_QUAD | CTLFLAG_RW), 1103 0, CPU_IDLE_STAT_SPIN, sysctl_cpu_idle_cnt, "Q", "Idle loop entry spins"); 1104 1105 static void 1106 cpu_idle_default_hook(void) 1107 { 1108 /* 1109 * We must guarentee that hlt is exactly the instruction 1110 * following the sti. 1111 */ 1112 __asm __volatile("sti; hlt"); 1113 } 1114 1115 /* Other subsystems (e.g., ACPI) can hook this later. */ 1116 void (*cpu_idle_hook)(void) = cpu_idle_default_hook; 1117 1118 static __inline int 1119 cpu_mwait_cx_hint(struct cpu_idle_stat *stat) 1120 { 1121 int hint, cx_idx; 1122 u_int idx; 1123 1124 hint = stat->hint; 1125 if (hint >= 0) 1126 goto done; 1127 1128 idx = (stat->repeat + stat->repeat_last + stat->repeat_delta) >> 1129 cpu_mwait_repeat_shift; 1130 if (idx >= cpu_mwait_c1_hints_cnt) { 1131 /* Step up faster, once we walked through all C1 states */ 1132 stat->repeat_delta += 1 << (cpu_mwait_repeat_shift + 1); 1133 } 1134 if (hint == CPU_MWAIT_HINT_AUTODEEP) { 1135 if (idx >= cpu_mwait_deep_hints_cnt) 1136 idx = cpu_mwait_deep_hints_cnt - 1; 1137 hint = cpu_mwait_deep_hints[idx]; 1138 } else { 1139 if (idx >= cpu_mwait_hints_cnt) 1140 idx = cpu_mwait_hints_cnt - 1; 1141 hint = cpu_mwait_hints[idx]; 1142 } 1143 done: 1144 cx_idx = MWAIT_EAX_TO_CX(hint); 1145 if (cx_idx >= 0 && cx_idx < CPU_MWAIT_CX_MAX) 1146 stat->mwait_cx[cx_idx]++; 1147 return hint; 1148 } 1149 1150 void 1151 cpu_idle(void) 1152 { 1153 globaldata_t gd = mycpu; 1154 struct cpu_idle_stat *stat = &cpu_idle_stats[gd->gd_cpuid]; 1155 struct thread *td __debugvar = gd->gd_curthread; 1156 int reqflags; 1157 1158 stat->repeat = stat->repeat_last = cpu_idle_repeat_max; 1159 1160 crit_exit(); 1161 KKASSERT(td->td_critcount == 0); 1162 1163 for (;;) { 1164 /* 1165 * See if there are any LWKTs ready to go. 1166 */ 1167 lwkt_switch(); 1168 1169 /* 1170 * When halting inside a cli we must check for reqflags 1171 * races, particularly [re]schedule requests. Running 1172 * splz() does the job. 1173 * 1174 * cpu_idle_hlt: 1175 * 0 Never halt, just spin 1176 * 1177 * 1 Always use MONITOR/MWAIT if avail, HLT 1178 * otherwise. 1179 * 1180 * Better default for modern (Haswell+) Intel 1181 * cpus. 1182 * 1183 * 2 Use HLT/MONITOR/MWAIT up to a point and then 1184 * use the ACPI halt (default). This is a hybrid 1185 * approach. See machdep.cpu_idle_repeat. 1186 * 1187 * Better default for modern AMD cpus and older 1188 * Intel cpus. 1189 * 1190 * 3 Always use the ACPI halt. This typically 1191 * eats the least amount of power but the cpu 1192 * will be slow waking up. Slows down e.g. 1193 * compiles and other pipe/event oriented stuff. 1194 * 1195 * Usually the best default for AMD cpus. 1196 * 1197 * 4 Always use HLT. 1198 * 1199 * 5 Always spin. 1200 * 1201 * NOTE: Interrupts are enabled and we are not in a critical 1202 * section. 1203 * 1204 * NOTE: Preemptions do not reset gd_idle_repeat. Also we 1205 * don't bother capping gd_idle_repeat, it is ok if 1206 * it overflows (we do make it unsigned, however). 1207 * 1208 * Implement optimized invltlb operations when halted 1209 * in idle. By setting the bit in smp_idleinvl_mask 1210 * we inform other cpus that they can set _reqs to 1211 * request an invltlb. Current the code to do that 1212 * sets the bits in _reqs anyway, but then check _mask 1213 * to determine if they can assume the invltlb will execute. 1214 * 1215 * A critical section is required to ensure that interrupts 1216 * do not fully run until after we've had a chance to execute 1217 * the request. 1218 */ 1219 if (gd->gd_idle_repeat == 0) { 1220 stat->repeat = (stat->repeat + stat->repeat_last) >> 1; 1221 if (stat->repeat > cpu_idle_repeat_max) 1222 stat->repeat = cpu_idle_repeat_max; 1223 stat->repeat_last = 0; 1224 stat->repeat_delta = 0; 1225 } 1226 ++stat->repeat_last; 1227 1228 /* 1229 * General idle thread halt code 1230 * 1231 * IBRS NOTES - IBRS is a SPECTRE mitigation. When going 1232 * idle, disable IBRS to reduce hyperthread 1233 * overhead. 1234 */ 1235 ++gd->gd_idle_repeat; 1236 1237 switch(cpu_idle_hlt) { 1238 default: 1239 case 0: 1240 /* 1241 * Always spin 1242 */ 1243 ; 1244 do_spin: 1245 splz(); 1246 __asm __volatile("sti"); 1247 stat->spin++; 1248 crit_enter_gd(gd); 1249 crit_exit_gd(gd); 1250 break; 1251 case 2: 1252 /* 1253 * Use MONITOR/MWAIT (or HLT) for a few cycles, 1254 * then start using the ACPI halt code if we 1255 * continue to be idle. 1256 */ 1257 if (gd->gd_idle_repeat >= cpu_idle_repeat) 1258 goto do_acpi; 1259 /* FALL THROUGH */ 1260 case 1: 1261 /* 1262 * Always use MONITOR/MWAIT (will use HLT if 1263 * MONITOR/MWAIT not available). 1264 */ 1265 if (cpu_mi_feature & CPU_MI_MONITOR) { 1266 splz(); /* XXX */ 1267 reqflags = gd->gd_reqflags; 1268 if (reqflags & RQF_IDLECHECK_WK_MASK) 1269 goto do_spin; 1270 crit_enter_gd(gd); 1271 ATOMIC_CPUMASK_ORBIT(smp_idleinvl_mask, gd->gd_cpuid); 1272 /* 1273 * IBRS/STIBP 1274 */ 1275 if (pscpu->trampoline.tr_pcb_spec_ctrl[1] & 1276 SPEC_CTRL_DUMMY_ENABLE) { 1277 wrmsr(MSR_SPEC_CTRL, pscpu->trampoline.tr_pcb_spec_ctrl[1] & (SPEC_CTRL_IBRS|SPEC_CTRL_STIBP)); 1278 } 1279 cpu_mmw_pause_int(&gd->gd_reqflags, reqflags, 1280 cpu_mwait_cx_hint(stat), 0); 1281 if (pscpu->trampoline.tr_pcb_spec_ctrl[0] & 1282 SPEC_CTRL_DUMMY_ENABLE) { 1283 wrmsr(MSR_SPEC_CTRL, pscpu->trampoline.tr_pcb_spec_ctrl[0] & (SPEC_CTRL_IBRS|SPEC_CTRL_STIBP)); 1284 } 1285 stat->halt++; 1286 ATOMIC_CPUMASK_NANDBIT(smp_idleinvl_mask, gd->gd_cpuid); 1287 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, 1288 gd->gd_cpuid)) { 1289 cpu_invltlb(); 1290 cpu_mfence(); 1291 } 1292 crit_exit_gd(gd); 1293 break; 1294 } 1295 /* FALLTHROUGH */ 1296 case 4: 1297 /* 1298 * Use HLT 1299 */ 1300 __asm __volatile("cli"); 1301 splz(); 1302 crit_enter_gd(gd); 1303 if ((gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 1304 ATOMIC_CPUMASK_ORBIT(smp_idleinvl_mask, 1305 gd->gd_cpuid); 1306 if (pscpu->trampoline.tr_pcb_spec_ctrl[1] & 1307 SPEC_CTRL_DUMMY_ENABLE) { 1308 wrmsr(MSR_SPEC_CTRL, pscpu->trampoline.tr_pcb_spec_ctrl[1] & (SPEC_CTRL_IBRS|SPEC_CTRL_STIBP)); 1309 } 1310 cpu_idle_default_hook(); 1311 if (pscpu->trampoline.tr_pcb_spec_ctrl[0] & 1312 SPEC_CTRL_DUMMY_ENABLE) { 1313 wrmsr(MSR_SPEC_CTRL, pscpu->trampoline.tr_pcb_spec_ctrl[0] & (SPEC_CTRL_IBRS|SPEC_CTRL_STIBP)); 1314 } 1315 ATOMIC_CPUMASK_NANDBIT(smp_idleinvl_mask, 1316 gd->gd_cpuid); 1317 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, 1318 gd->gd_cpuid)) { 1319 cpu_invltlb(); 1320 cpu_mfence(); 1321 } 1322 } 1323 __asm __volatile("sti"); 1324 stat->halt++; 1325 crit_exit_gd(gd); 1326 break; 1327 case 3: 1328 /* 1329 * Use ACPI halt 1330 */ 1331 ; 1332 do_acpi: 1333 __asm __volatile("cli"); 1334 splz(); 1335 crit_enter_gd(gd); 1336 if ((gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 1337 ATOMIC_CPUMASK_ORBIT(smp_idleinvl_mask, 1338 gd->gd_cpuid); 1339 if (pscpu->trampoline.tr_pcb_spec_ctrl[1] & 1340 SPEC_CTRL_DUMMY_ENABLE) { 1341 wrmsr(MSR_SPEC_CTRL, pscpu->trampoline.tr_pcb_spec_ctrl[1] & (SPEC_CTRL_IBRS|SPEC_CTRL_STIBP)); 1342 } 1343 cpu_idle_hook(); 1344 if (pscpu->trampoline.tr_pcb_spec_ctrl[0] & 1345 SPEC_CTRL_DUMMY_ENABLE) { 1346 wrmsr(MSR_SPEC_CTRL, pscpu->trampoline.tr_pcb_spec_ctrl[0] & (SPEC_CTRL_IBRS|SPEC_CTRL_STIBP)); 1347 } 1348 ATOMIC_CPUMASK_NANDBIT(smp_idleinvl_mask, 1349 gd->gd_cpuid); 1350 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, 1351 gd->gd_cpuid)) { 1352 cpu_invltlb(); 1353 cpu_mfence(); 1354 } 1355 } 1356 __asm __volatile("sti"); 1357 stat->halt++; 1358 crit_exit_gd(gd); 1359 break; 1360 } 1361 } 1362 } 1363 1364 /* 1365 * Called from deep ACPI via cpu_idle_hook() (see above) to actually halt 1366 * the cpu in C1. ACPI might use other halt methods for deeper states 1367 * and not reach here. 1368 * 1369 * For now we always use HLT as we are not sure what ACPI may have actually 1370 * done. MONITOR/MWAIT might not be appropriate. 1371 * 1372 * NOTE: MONITOR/MWAIT does not appear to throttle AMD cpus, while HLT 1373 * does. On Intel, MONITOR/MWAIT does appear to throttle the cpu. 1374 */ 1375 void 1376 cpu_idle_halt(void) 1377 { 1378 globaldata_t gd; 1379 1380 gd = mycpu; 1381 #if 0 1382 /* DISABLED FOR NOW */ 1383 struct cpu_idle_stat *stat; 1384 int reqflags; 1385 1386 1387 if ((cpu_idle_hlt == 1 || cpu_idle_hlt == 2) && 1388 (cpu_mi_feature & CPU_MI_MONITOR) && 1389 cpu_vendor_id != CPU_VENDOR_AMD) { 1390 /* 1391 * Use MONITOR/MWAIT 1392 * 1393 * (NOTE: On ryzen, MWAIT does not throttle clocks, so we 1394 * have to use HLT) 1395 */ 1396 stat = &cpu_idle_stats[gd->gd_cpuid]; 1397 reqflags = gd->gd_reqflags; 1398 if ((reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 1399 __asm __volatile("sti"); 1400 cpu_mmw_pause_int(&gd->gd_reqflags, reqflags, 1401 cpu_mwait_cx_hint(stat), 0); 1402 } else { 1403 __asm __volatile("sti; pause"); 1404 } 1405 } else 1406 #endif 1407 { 1408 /* 1409 * Use HLT 1410 */ 1411 if ((gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1412 __asm __volatile("sti; hlt"); 1413 else 1414 __asm __volatile("sti; pause"); 1415 } 1416 } 1417 1418 1419 /* 1420 * Called in a loop indirectly via Xcpustop 1421 */ 1422 void 1423 cpu_smp_stopped(void) 1424 { 1425 globaldata_t gd = mycpu; 1426 volatile __uint64_t *ptr; 1427 __uint64_t ovalue; 1428 1429 ptr = CPUMASK_ADDR(started_cpus, gd->gd_cpuid); 1430 ovalue = *ptr; 1431 if ((ovalue & CPUMASK_SIMPLE(gd->gd_cpuid & 63)) == 0) { 1432 if (cpu_mi_feature & CPU_MI_MONITOR) { 1433 if (cpu_mwait_hints) { 1434 cpu_mmw_pause_long(__DEVOLATILE(void *, ptr), 1435 ovalue, 1436 cpu_mwait_hints[ 1437 cpu_mwait_hints_cnt - 1], 0); 1438 } else { 1439 cpu_mmw_pause_long(__DEVOLATILE(void *, ptr), 1440 ovalue, 0, 0); 1441 } 1442 } else { 1443 cpu_halt(); /* depend on lapic timer */ 1444 } 1445 } 1446 } 1447 1448 /* 1449 * This routine is called if a spinlock has been held through the 1450 * exponential backoff period and is seriously contested. On a real cpu 1451 * we let it spin. 1452 */ 1453 void 1454 cpu_spinlock_contested(void) 1455 { 1456 cpu_pause(); 1457 } 1458 1459 /* 1460 * Clear registers on exec 1461 */ 1462 void 1463 exec_setregs(u_long entry, u_long stack, u_long ps_strings) 1464 { 1465 struct thread *td = curthread; 1466 struct lwp *lp = td->td_lwp; 1467 struct pcb *pcb = td->td_pcb; 1468 struct trapframe *regs = lp->lwp_md.md_regs; 1469 1470 user_ldt_free(pcb); 1471 1472 clear_quickret(); 1473 bzero((char *)regs, sizeof(struct trapframe)); 1474 regs->tf_rip = entry; 1475 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */ 1476 regs->tf_rdi = stack; /* argv */ 1477 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 1478 regs->tf_ss = _udatasel; 1479 regs->tf_cs = _ucodesel; 1480 regs->tf_rbx = ps_strings; 1481 1482 /* 1483 * Reset the hardware debug registers if they were in use. 1484 * They won't have any meaning for the newly exec'd process. 1485 */ 1486 if (pcb->pcb_flags & PCB_DBREGS) { 1487 pcb->pcb_dr0 = 0; 1488 pcb->pcb_dr1 = 0; 1489 pcb->pcb_dr2 = 0; 1490 pcb->pcb_dr3 = 0; 1491 pcb->pcb_dr6 = 0; 1492 pcb->pcb_dr7 = 0; /* JG set bit 10? */ 1493 if (pcb == td->td_pcb) { 1494 /* 1495 * Clear the debug registers on the running 1496 * CPU, otherwise they will end up affecting 1497 * the next process we switch to. 1498 */ 1499 reset_dbregs(); 1500 } 1501 pcb->pcb_flags &= ~PCB_DBREGS; 1502 } 1503 1504 /* 1505 * Initialize the math emulator (if any) for the current process. 1506 * Actually, just clear the bit that says that the emulator has 1507 * been initialized. Initialization is delayed until the process 1508 * traps to the emulator (if it is done at all) mainly because 1509 * emulators don't provide an entry point for initialization. 1510 */ 1511 pcb->pcb_flags &= ~FP_SOFTFP; 1512 1513 /* 1514 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing 1515 * gd_npxthread. Otherwise a preemptive interrupt thread 1516 * may panic in npxdna(). 1517 */ 1518 crit_enter(); 1519 load_cr0(rcr0() | CR0_MP); 1520 1521 /* 1522 * NOTE: The MSR values must be correct so we can return to 1523 * userland. gd_user_fs/gs must be correct so the switch 1524 * code knows what the current MSR values are. 1525 */ 1526 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */ 1527 pcb->pcb_gsbase = 0; 1528 mdcpu->gd_user_fs = 0; /* Cache of current MSR values */ 1529 mdcpu->gd_user_gs = 0; 1530 wrmsr(MSR_FSBASE, 0); /* Set MSR values for return to userland */ 1531 wrmsr(MSR_KGSBASE, 0); 1532 1533 /* Initialize the npx (if any) for the current process. */ 1534 npxinit(); 1535 crit_exit(); 1536 1537 pcb->pcb_ds = _udatasel; 1538 pcb->pcb_es = _udatasel; 1539 pcb->pcb_fs = _udatasel; 1540 pcb->pcb_gs = _udatasel; 1541 } 1542 1543 void 1544 cpu_setregs(void) 1545 { 1546 register_t cr0; 1547 1548 cr0 = rcr0(); 1549 cr0 |= CR0_NE; /* Done by npxinit() */ 1550 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1551 cr0 |= CR0_WP | CR0_AM; 1552 load_cr0(cr0); 1553 load_gs(_udatasel); 1554 } 1555 1556 static int 1557 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1558 { 1559 int error; 1560 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1561 req); 1562 if (!error && req->newptr) 1563 resettodr(); 1564 return (error); 1565 } 1566 1567 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1568 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1569 1570 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1571 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1572 1573 #if 0 /* JG */ 1574 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1575 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1576 #endif 1577 1578 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1579 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1580 1581 static int 1582 efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS) 1583 { 1584 struct efi_map_header *efihdr; 1585 caddr_t kmdp; 1586 uint32_t efisize; 1587 1588 kmdp = preload_search_by_type("elf kernel"); 1589 if (kmdp == NULL) 1590 kmdp = preload_search_by_type("elf64 kernel"); 1591 efihdr = (struct efi_map_header *)preload_search_info(kmdp, 1592 MODINFO_METADATA | MODINFOMD_EFI_MAP); 1593 if (efihdr == NULL) 1594 return (0); 1595 efisize = *((uint32_t *)efihdr - 1); 1596 return (SYSCTL_OUT(req, efihdr, efisize)); 1597 } 1598 SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, 1599 efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map"); 1600 1601 /* 1602 * Initialize x86 and configure to run kernel 1603 */ 1604 1605 /* 1606 * Initialize segments & interrupt table 1607 */ 1608 1609 int _default_ldt; 1610 struct user_segment_descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1611 struct gate_descriptor idt_arr[MAXCPU][NIDT]; 1612 #if 0 /* JG */ 1613 union descriptor ldt[NLDT]; /* local descriptor table */ 1614 #endif 1615 1616 /* table descriptors - used to load tables by cpu */ 1617 struct region_descriptor r_gdt; 1618 struct region_descriptor r_idt_arr[MAXCPU]; 1619 1620 /* JG proc0paddr is a virtual address */ 1621 void *proc0paddr; 1622 /* JG alignment? */ 1623 char proc0paddr_buff[LWKT_THREAD_STACK]; 1624 1625 1626 /* software prototypes -- in more palatable form */ 1627 struct soft_segment_descriptor gdt_segs[] = { 1628 /* GNULL_SEL 0 Null Descriptor */ 1629 { 0x0, /* segment base address */ 1630 0x0, /* length */ 1631 0, /* segment type */ 1632 0, /* segment descriptor priority level */ 1633 0, /* segment descriptor present */ 1634 0, /* long */ 1635 0, /* default 32 vs 16 bit size */ 1636 0 /* limit granularity (byte/page units)*/ }, 1637 /* GCODE_SEL 1 Code Descriptor for kernel */ 1638 { 0x0, /* segment base address */ 1639 0xfffff, /* length - all address space */ 1640 SDT_MEMERA, /* segment type */ 1641 SEL_KPL, /* segment descriptor priority level */ 1642 1, /* segment descriptor present */ 1643 1, /* long */ 1644 0, /* default 32 vs 16 bit size */ 1645 1 /* limit granularity (byte/page units)*/ }, 1646 /* GDATA_SEL 2 Data Descriptor for kernel */ 1647 { 0x0, /* segment base address */ 1648 0xfffff, /* length - all address space */ 1649 SDT_MEMRWA, /* segment type */ 1650 SEL_KPL, /* segment descriptor priority level */ 1651 1, /* segment descriptor present */ 1652 1, /* long */ 1653 0, /* default 32 vs 16 bit size */ 1654 1 /* limit granularity (byte/page units)*/ }, 1655 /* GUCODE32_SEL 3 32 bit Code Descriptor for user */ 1656 { 0x0, /* segment base address */ 1657 0xfffff, /* length - all address space */ 1658 SDT_MEMERA, /* segment type */ 1659 SEL_UPL, /* segment descriptor priority level */ 1660 1, /* segment descriptor present */ 1661 0, /* long */ 1662 1, /* default 32 vs 16 bit size */ 1663 1 /* limit granularity (byte/page units)*/ }, 1664 /* GUDATA_SEL 4 32/64 bit Data Descriptor for user */ 1665 { 0x0, /* segment base address */ 1666 0xfffff, /* length - all address space */ 1667 SDT_MEMRWA, /* segment type */ 1668 SEL_UPL, /* segment descriptor priority level */ 1669 1, /* segment descriptor present */ 1670 0, /* long */ 1671 1, /* default 32 vs 16 bit size */ 1672 1 /* limit granularity (byte/page units)*/ }, 1673 /* GUCODE_SEL 5 64 bit Code Descriptor for user */ 1674 { 0x0, /* segment base address */ 1675 0xfffff, /* length - all address space */ 1676 SDT_MEMERA, /* segment type */ 1677 SEL_UPL, /* segment descriptor priority level */ 1678 1, /* segment descriptor present */ 1679 1, /* long */ 1680 0, /* default 32 vs 16 bit size */ 1681 1 /* limit granularity (byte/page units)*/ }, 1682 /* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 1683 { 1684 0x0, /* segment base address */ 1685 sizeof(struct x86_64tss)-1,/* length - all address space */ 1686 SDT_SYSTSS, /* segment type */ 1687 SEL_KPL, /* segment descriptor priority level */ 1688 1, /* segment descriptor present */ 1689 0, /* long */ 1690 0, /* unused - default 32 vs 16 bit size */ 1691 0 /* limit granularity (byte/page units)*/ }, 1692 /* Actually, the TSS is a system descriptor which is double size */ 1693 { 0x0, /* segment base address */ 1694 0x0, /* length */ 1695 0, /* segment type */ 1696 0, /* segment descriptor priority level */ 1697 0, /* segment descriptor present */ 1698 0, /* long */ 1699 0, /* default 32 vs 16 bit size */ 1700 0 /* limit granularity (byte/page units)*/ }, 1701 /* GUGS32_SEL 8 32 bit GS Descriptor for user */ 1702 { 0x0, /* segment base address */ 1703 0xfffff, /* length - all address space */ 1704 SDT_MEMRWA, /* segment type */ 1705 SEL_UPL, /* segment descriptor priority level */ 1706 1, /* segment descriptor present */ 1707 0, /* long */ 1708 1, /* default 32 vs 16 bit size */ 1709 1 /* limit granularity (byte/page units)*/ }, 1710 }; 1711 1712 void 1713 setidt_global(int idx, inthand_t *func, int typ, int dpl, int ist) 1714 { 1715 int cpu; 1716 1717 for (cpu = 0; cpu < MAXCPU; ++cpu) { 1718 struct gate_descriptor *ip = &idt_arr[cpu][idx]; 1719 1720 ip->gd_looffset = (uintptr_t)func; 1721 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 1722 ip->gd_ist = ist; 1723 ip->gd_xx = 0; 1724 ip->gd_type = typ; 1725 ip->gd_dpl = dpl; 1726 ip->gd_p = 1; 1727 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 1728 } 1729 } 1730 1731 void 1732 setidt(int idx, inthand_t *func, int typ, int dpl, int ist, int cpu) 1733 { 1734 struct gate_descriptor *ip; 1735 1736 KASSERT(cpu >= 0 && cpu < ncpus, ("invalid cpu %d", cpu)); 1737 1738 ip = &idt_arr[cpu][idx]; 1739 ip->gd_looffset = (uintptr_t)func; 1740 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 1741 ip->gd_ist = ist; 1742 ip->gd_xx = 0; 1743 ip->gd_type = typ; 1744 ip->gd_dpl = dpl; 1745 ip->gd_p = 1; 1746 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 1747 } 1748 1749 #define IDTVEC(name) __CONCAT(X,name) 1750 1751 extern inthand_t 1752 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1753 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1754 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1755 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1756 IDTVEC(xmm), IDTVEC(dblfault), 1757 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 1758 1759 void 1760 sdtossd(struct user_segment_descriptor *sd, struct soft_segment_descriptor *ssd) 1761 { 1762 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1763 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1764 ssd->ssd_type = sd->sd_type; 1765 ssd->ssd_dpl = sd->sd_dpl; 1766 ssd->ssd_p = sd->sd_p; 1767 ssd->ssd_def32 = sd->sd_def32; 1768 ssd->ssd_gran = sd->sd_gran; 1769 } 1770 1771 void 1772 ssdtosd(struct soft_segment_descriptor *ssd, struct user_segment_descriptor *sd) 1773 { 1774 1775 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 1776 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 1777 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 1778 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 1779 sd->sd_type = ssd->ssd_type; 1780 sd->sd_dpl = ssd->ssd_dpl; 1781 sd->sd_p = ssd->ssd_p; 1782 sd->sd_long = ssd->ssd_long; 1783 sd->sd_def32 = ssd->ssd_def32; 1784 sd->sd_gran = ssd->ssd_gran; 1785 } 1786 1787 void 1788 ssdtosyssd(struct soft_segment_descriptor *ssd, 1789 struct system_segment_descriptor *sd) 1790 { 1791 1792 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 1793 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 1794 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 1795 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 1796 sd->sd_type = ssd->ssd_type; 1797 sd->sd_dpl = ssd->ssd_dpl; 1798 sd->sd_p = ssd->ssd_p; 1799 sd->sd_gran = ssd->ssd_gran; 1800 } 1801 1802 /* 1803 * Populate the (physmap) array with base/bound pairs describing the 1804 * available physical memory in the system, then test this memory and 1805 * build the phys_avail array describing the actually-available memory. 1806 * 1807 * If we cannot accurately determine the physical memory map, then use 1808 * value from the 0xE801 call, and failing that, the RTC. 1809 * 1810 * Total memory size may be set by the kernel environment variable 1811 * hw.physmem or the compile-time define MAXMEM. 1812 * 1813 * Memory is aligned to PHYSMAP_ALIGN which must be a multiple 1814 * of PAGE_SIZE. This also greatly reduces the memory test time 1815 * which would otherwise be excessive on machines with > 8G of ram. 1816 * 1817 * XXX first should be vm_paddr_t. 1818 */ 1819 1820 #define PHYSMAP_ALIGN (vm_paddr_t)(128 * 1024) 1821 #define PHYSMAP_ALIGN_MASK (vm_paddr_t)(PHYSMAP_ALIGN - 1) 1822 #define PHYSMAP_SIZE VM_PHYSSEG_MAX 1823 1824 vm_paddr_t physmap[PHYSMAP_SIZE]; 1825 struct bios_smap *smapbase, *smap, *smapend; 1826 struct efi_map_header *efihdrbase; 1827 u_int32_t smapsize; 1828 1829 #define PHYSMAP_HANDWAVE (vm_paddr_t)(2 * 1024 * 1024) 1830 #define PHYSMAP_HANDWAVE_MASK (PHYSMAP_HANDWAVE - 1) 1831 1832 static void 1833 add_smap_entries(int *physmap_idx) 1834 { 1835 int i; 1836 1837 smapsize = *((u_int32_t *)smapbase - 1); 1838 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 1839 1840 for (smap = smapbase; smap < smapend; smap++) { 1841 if (boothowto & RB_VERBOSE) 1842 kprintf("SMAP type=%02x base=%016lx len=%016lx\n", 1843 smap->type, smap->base, smap->length); 1844 1845 if (smap->type != SMAP_TYPE_MEMORY) 1846 continue; 1847 1848 if (smap->length == 0) 1849 continue; 1850 1851 for (i = 0; i <= *physmap_idx; i += 2) { 1852 if (smap->base < physmap[i + 1]) { 1853 if (boothowto & RB_VERBOSE) { 1854 kprintf("Overlapping or non-monotonic " 1855 "memory region, ignoring " 1856 "second region\n"); 1857 } 1858 break; 1859 } 1860 } 1861 if (i <= *physmap_idx) 1862 continue; 1863 1864 Realmem += smap->length; 1865 1866 if (smap->base == physmap[*physmap_idx + 1]) { 1867 physmap[*physmap_idx + 1] += smap->length; 1868 continue; 1869 } 1870 1871 *physmap_idx += 2; 1872 if (*physmap_idx == PHYSMAP_SIZE) { 1873 kprintf("Too many segments in the physical " 1874 "address map, giving up\n"); 1875 break; 1876 } 1877 physmap[*physmap_idx] = smap->base; 1878 physmap[*physmap_idx + 1] = smap->base + smap->length; 1879 } 1880 } 1881 1882 static void 1883 add_efi_map_entries(int *physmap_idx) 1884 { 1885 struct efi_md *map, *p; 1886 const char *type; 1887 size_t efisz; 1888 int i, ndesc; 1889 1890 static const char *types[] = { 1891 "Reserved", 1892 "LoaderCode", 1893 "LoaderData", 1894 "BootServicesCode", 1895 "BootServicesData", 1896 "RuntimeServicesCode", 1897 "RuntimeServicesData", 1898 "ConventionalMemory", 1899 "UnusableMemory", 1900 "ACPIReclaimMemory", 1901 "ACPIMemoryNVS", 1902 "MemoryMappedIO", 1903 "MemoryMappedIOPortSpace", 1904 "PalCode" 1905 }; 1906 1907 /* 1908 * Memory map data provided by UEFI via the GetMemoryMap 1909 * Boot Services API. 1910 */ 1911 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; 1912 map = (struct efi_md *)((uint8_t *)efihdrbase + efisz); 1913 1914 if (efihdrbase->descriptor_size == 0) 1915 return; 1916 ndesc = efihdrbase->memory_size / efihdrbase->descriptor_size; 1917 1918 if (boothowto & RB_VERBOSE) 1919 kprintf("%23s %12s %12s %8s %4s\n", 1920 "Type", "Physical", "Virtual", "#Pages", "Attr"); 1921 1922 for (i = 0, p = map; i < ndesc; i++, 1923 p = efi_next_descriptor(p, efihdrbase->descriptor_size)) { 1924 if (boothowto & RB_VERBOSE) { 1925 if (p->md_type <= EFI_MD_TYPE_PALCODE) 1926 type = types[p->md_type]; 1927 else 1928 type = "<INVALID>"; 1929 kprintf("%23s %012lx %12p %08lx ", type, p->md_phys, 1930 p->md_virt, p->md_pages); 1931 if (p->md_attr & EFI_MD_ATTR_UC) 1932 kprintf("UC "); 1933 if (p->md_attr & EFI_MD_ATTR_WC) 1934 kprintf("WC "); 1935 if (p->md_attr & EFI_MD_ATTR_WT) 1936 kprintf("WT "); 1937 if (p->md_attr & EFI_MD_ATTR_WB) 1938 kprintf("WB "); 1939 if (p->md_attr & EFI_MD_ATTR_UCE) 1940 kprintf("UCE "); 1941 if (p->md_attr & EFI_MD_ATTR_WP) 1942 kprintf("WP "); 1943 if (p->md_attr & EFI_MD_ATTR_RP) 1944 kprintf("RP "); 1945 if (p->md_attr & EFI_MD_ATTR_XP) 1946 kprintf("XP "); 1947 if (p->md_attr & EFI_MD_ATTR_RT) 1948 kprintf("RUNTIME"); 1949 kprintf("\n"); 1950 } 1951 1952 switch (p->md_type) { 1953 case EFI_MD_TYPE_CODE: 1954 case EFI_MD_TYPE_DATA: 1955 case EFI_MD_TYPE_BS_CODE: 1956 case EFI_MD_TYPE_BS_DATA: 1957 case EFI_MD_TYPE_FREE: 1958 /* 1959 * We're allowed to use any entry with these types. 1960 */ 1961 break; 1962 default: 1963 continue; 1964 } 1965 1966 Realmem += p->md_pages * PAGE_SIZE; 1967 1968 if (p->md_phys == physmap[*physmap_idx + 1]) { 1969 physmap[*physmap_idx + 1] += p->md_pages * PAGE_SIZE; 1970 continue; 1971 } 1972 1973 *physmap_idx += 2; 1974 if (*physmap_idx == PHYSMAP_SIZE) { 1975 kprintf("Too many segments in the physical " 1976 "address map, giving up\n"); 1977 break; 1978 } 1979 physmap[*physmap_idx] = p->md_phys; 1980 physmap[*physmap_idx + 1] = p->md_phys + p->md_pages * PAGE_SIZE; 1981 } 1982 } 1983 1984 struct fb_info efi_fb_info; 1985 static int have_efi_framebuffer = 0; 1986 1987 static void 1988 efi_fb_init_vaddr(int direct_map) 1989 { 1990 uint64_t sz; 1991 vm_offset_t addr, v; 1992 1993 v = efi_fb_info.vaddr; 1994 sz = efi_fb_info.stride * efi_fb_info.height; 1995 1996 if (direct_map) { 1997 addr = PHYS_TO_DMAP(efi_fb_info.paddr); 1998 if (addr >= DMAP_MIN_ADDRESS && addr + sz < DMAP_MAX_ADDRESS) 1999 efi_fb_info.vaddr = addr; 2000 } else { 2001 efi_fb_info.vaddr = (vm_offset_t)pmap_mapdev_attr( 2002 efi_fb_info.paddr, sz, PAT_WRITE_COMBINING); 2003 } 2004 } 2005 2006 static u_int 2007 efifb_color_depth(struct efi_fb *efifb) 2008 { 2009 uint32_t mask; 2010 u_int depth; 2011 2012 mask = efifb->fb_mask_red | efifb->fb_mask_green | 2013 efifb->fb_mask_blue | efifb->fb_mask_reserved; 2014 if (mask == 0) 2015 return (0); 2016 for (depth = 1; mask != 1; depth++) 2017 mask >>= 1; 2018 return (depth); 2019 } 2020 2021 int 2022 probe_efi_fb(int early) 2023 { 2024 struct efi_fb *efifb; 2025 caddr_t kmdp; 2026 u_int depth; 2027 2028 if (have_efi_framebuffer) { 2029 if (!early && 2030 (efi_fb_info.vaddr == 0 || 2031 efi_fb_info.vaddr == PHYS_TO_DMAP(efi_fb_info.paddr))) 2032 efi_fb_init_vaddr(0); 2033 return 0; 2034 } 2035 2036 kmdp = preload_search_by_type("elf kernel"); 2037 if (kmdp == NULL) 2038 kmdp = preload_search_by_type("elf64 kernel"); 2039 efifb = (struct efi_fb *)preload_search_info(kmdp, 2040 MODINFO_METADATA | MODINFOMD_EFI_FB); 2041 if (efifb == NULL) 2042 return 1; 2043 2044 depth = efifb_color_depth(efifb); 2045 /* 2046 * Our bootloader should already notice, when we won't be able to 2047 * use the UEFI framebuffer. 2048 */ 2049 if (depth != 24 && depth != 32) 2050 return 1; 2051 2052 have_efi_framebuffer = 1; 2053 2054 efi_fb_info.is_vga_boot_display = 1; 2055 efi_fb_info.width = efifb->fb_width; 2056 efi_fb_info.height = efifb->fb_height; 2057 efi_fb_info.depth = depth; 2058 efi_fb_info.stride = efifb->fb_stride * (depth / 8); 2059 efi_fb_info.paddr = efifb->fb_addr; 2060 if (early) { 2061 efi_fb_info.vaddr = 0; 2062 } else { 2063 efi_fb_init_vaddr(0); 2064 } 2065 efi_fb_info.fbops.fb_set_par = NULL; 2066 efi_fb_info.fbops.fb_blank = NULL; 2067 efi_fb_info.fbops.fb_debug_enter = NULL; 2068 efi_fb_info.device = NULL; 2069 2070 return 0; 2071 } 2072 2073 static void 2074 efifb_startup(void *arg) 2075 { 2076 probe_efi_fb(0); 2077 } 2078 2079 SYSINIT(efi_fb_info, SI_BOOT1_POST, SI_ORDER_FIRST, efifb_startup, NULL); 2080 2081 static void 2082 getmemsize(caddr_t kmdp, u_int64_t first) 2083 { 2084 int off, physmap_idx, pa_indx, da_indx; 2085 int i, j; 2086 vm_paddr_t pa; 2087 vm_paddr_t msgbuf_size; 2088 u_long physmem_tunable; 2089 pt_entry_t *pte; 2090 quad_t dcons_addr, dcons_size; 2091 2092 bzero(physmap, sizeof(physmap)); 2093 physmap_idx = 0; 2094 2095 /* 2096 * get memory map from INT 15:E820, kindly supplied by the loader. 2097 * 2098 * subr_module.c says: 2099 * "Consumer may safely assume that size value precedes data." 2100 * ie: an int32_t immediately precedes smap. 2101 */ 2102 efihdrbase = (struct efi_map_header *)preload_search_info(kmdp, 2103 MODINFO_METADATA | MODINFOMD_EFI_MAP); 2104 smapbase = (struct bios_smap *)preload_search_info(kmdp, 2105 MODINFO_METADATA | MODINFOMD_SMAP); 2106 if (smapbase == NULL && efihdrbase == NULL) 2107 panic("No BIOS smap or EFI map info from loader!"); 2108 2109 if (efihdrbase == NULL) 2110 add_smap_entries(&physmap_idx); 2111 else 2112 add_efi_map_entries(&physmap_idx); 2113 2114 base_memory = physmap[1] / 1024; 2115 /* make hole for AP bootstrap code */ 2116 physmap[1] = mp_bootaddress(base_memory); 2117 2118 /* Save EBDA address, if any */ 2119 ebda_addr = (u_long)(*(u_short *)(KERNBASE + 0x40e)); 2120 ebda_addr <<= 4; 2121 2122 /* 2123 * Maxmem isn't the "maximum memory", it's one larger than the 2124 * highest page of the physical address space. It should be 2125 * called something like "Maxphyspage". We may adjust this 2126 * based on ``hw.physmem'' and the results of the memory test. 2127 */ 2128 Maxmem = atop(physmap[physmap_idx + 1]); 2129 2130 #ifdef MAXMEM 2131 Maxmem = MAXMEM / 4; 2132 #endif 2133 2134 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 2135 Maxmem = atop(physmem_tunable); 2136 2137 /* 2138 * Don't allow MAXMEM or hw.physmem to extend the amount of memory 2139 * in the system. 2140 */ 2141 if (Maxmem > atop(physmap[physmap_idx + 1])) 2142 Maxmem = atop(physmap[physmap_idx + 1]); 2143 2144 /* 2145 * Blowing out the DMAP will blow up the system. 2146 */ 2147 if (Maxmem > atop(DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS)) { 2148 kprintf("Limiting Maxmem due to DMAP size\n"); 2149 Maxmem = atop(DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS); 2150 } 2151 2152 if (atop(physmap[physmap_idx + 1]) != Maxmem && 2153 (boothowto & RB_VERBOSE)) { 2154 kprintf("Physical memory use set to %ldK\n", Maxmem * 4); 2155 } 2156 2157 /* 2158 * Call pmap initialization to make new kernel address space 2159 * 2160 * Mask off page 0. 2161 */ 2162 pmap_bootstrap(&first); 2163 physmap[0] = PAGE_SIZE; 2164 2165 /* 2166 * Align the physmap to PHYSMAP_ALIGN and cut out anything 2167 * exceeding Maxmem. 2168 */ 2169 for (i = j = 0; i <= physmap_idx; i += 2) { 2170 if (physmap[i+1] > ptoa(Maxmem)) 2171 physmap[i+1] = ptoa(Maxmem); 2172 physmap[i] = (physmap[i] + PHYSMAP_ALIGN_MASK) & 2173 ~PHYSMAP_ALIGN_MASK; 2174 physmap[i+1] = physmap[i+1] & ~PHYSMAP_ALIGN_MASK; 2175 2176 physmap[j] = physmap[i]; 2177 physmap[j+1] = physmap[i+1]; 2178 2179 if (physmap[i] < physmap[i+1]) 2180 j += 2; 2181 } 2182 physmap_idx = j - 2; 2183 2184 /* 2185 * Align anything else used in the validation loop. 2186 * 2187 * Also make sure that our 2MB kernel text+data+bss mappings 2188 * do not overlap potentially allocatable space. 2189 */ 2190 first = (first + PHYSMAP_ALIGN_MASK) & ~PHYSMAP_ALIGN_MASK; 2191 2192 /* 2193 * Size up each available chunk of physical memory. 2194 */ 2195 pa_indx = 0; 2196 da_indx = 0; 2197 phys_avail[pa_indx].phys_beg = physmap[0]; 2198 phys_avail[pa_indx].phys_end = physmap[0]; 2199 dump_avail[da_indx].phys_beg = 0; 2200 dump_avail[da_indx].phys_end = physmap[0]; 2201 pte = CMAP1; 2202 2203 /* 2204 * Get dcons buffer address 2205 */ 2206 if (kgetenv_quad("dcons.addr", &dcons_addr) == 0 || 2207 kgetenv_quad("dcons.size", &dcons_size) == 0) 2208 dcons_addr = 0; 2209 2210 /* 2211 * Validate the physical memory. The physical memory segments 2212 * have already been aligned to PHYSMAP_ALIGN which is a multiple 2213 * of PAGE_SIZE. 2214 * 2215 * We no longer perform an exhaustive memory test. Instead we 2216 * simply test the first and last word in each physmap[] 2217 * segment. 2218 */ 2219 for (i = 0; i <= physmap_idx; i += 2) { 2220 vm_paddr_t end; 2221 vm_paddr_t incr; 2222 2223 end = physmap[i + 1]; 2224 2225 for (pa = physmap[i]; pa < end; pa += incr) { 2226 int page_bad, full; 2227 volatile uint64_t *ptr = (uint64_t *)CADDR1; 2228 uint64_t tmp; 2229 2230 full = FALSE; 2231 2232 /* 2233 * Calculate incr. Just test the first and 2234 * last page in each physmap[] segment. 2235 */ 2236 if (pa == end - PAGE_SIZE) 2237 incr = PAGE_SIZE; 2238 else 2239 incr = end - pa - PAGE_SIZE; 2240 2241 /* 2242 * Make sure we don't skip blacked out areas. 2243 */ 2244 if (pa < 0x200000 && 0x200000 < end) { 2245 incr = 0x200000 - pa; 2246 } 2247 if (dcons_addr > 0 && 2248 pa < dcons_addr && 2249 dcons_addr < end) { 2250 incr = dcons_addr - pa; 2251 } 2252 2253 /* 2254 * Block out kernel memory as not available. 2255 */ 2256 if (pa >= 0x200000 && pa < first) { 2257 incr = first - pa; 2258 if (pa + incr > end) 2259 incr = end - pa; 2260 goto do_dump_avail; 2261 } 2262 2263 /* 2264 * Block out the dcons buffer if it exists. 2265 */ 2266 if (dcons_addr > 0 && 2267 pa >= trunc_page(dcons_addr) && 2268 pa < dcons_addr + dcons_size) { 2269 incr = dcons_addr + dcons_size - pa; 2270 incr = (incr + PAGE_MASK) & 2271 ~(vm_paddr_t)PAGE_MASK; 2272 if (pa + incr > end) 2273 incr = end - pa; 2274 goto do_dump_avail; 2275 } 2276 2277 page_bad = FALSE; 2278 2279 /* 2280 * Map the page non-cacheable for the memory 2281 * test. 2282 */ 2283 *pte = pa | 2284 kernel_pmap.pmap_bits[PG_V_IDX] | 2285 kernel_pmap.pmap_bits[PG_RW_IDX] | 2286 kernel_pmap.pmap_bits[PG_N_IDX]; 2287 cpu_invlpg(__DEVOLATILE(void *, ptr)); 2288 cpu_mfence(); 2289 2290 /* 2291 * Save original value for restoration later. 2292 */ 2293 tmp = *ptr; 2294 2295 /* 2296 * Test for alternating 1's and 0's 2297 */ 2298 *ptr = 0xaaaaaaaaaaaaaaaaLLU; 2299 cpu_mfence(); 2300 if (*ptr != 0xaaaaaaaaaaaaaaaaLLU) 2301 page_bad = TRUE; 2302 /* 2303 * Test for alternating 0's and 1's 2304 */ 2305 *ptr = 0x5555555555555555LLU; 2306 cpu_mfence(); 2307 if (*ptr != 0x5555555555555555LLU) 2308 page_bad = TRUE; 2309 /* 2310 * Test for all 1's 2311 */ 2312 *ptr = 0xffffffffffffffffLLU; 2313 cpu_mfence(); 2314 if (*ptr != 0xffffffffffffffffLLU) 2315 page_bad = TRUE; 2316 /* 2317 * Test for all 0's 2318 */ 2319 *ptr = 0x0; 2320 cpu_mfence(); 2321 if (*ptr != 0x0) 2322 page_bad = TRUE; 2323 2324 /* 2325 * Restore original value. 2326 */ 2327 *ptr = tmp; 2328 2329 /* 2330 * Adjust array of valid/good pages. 2331 */ 2332 if (page_bad == TRUE) { 2333 incr = PAGE_SIZE; 2334 continue; 2335 } 2336 2337 /* 2338 * Collapse page address into phys_avail[]. Do a 2339 * continuation of the current phys_avail[] index 2340 * when possible. 2341 */ 2342 if (phys_avail[pa_indx].phys_end == pa) { 2343 /* 2344 * Continuation 2345 */ 2346 phys_avail[pa_indx].phys_end += incr; 2347 } else if (phys_avail[pa_indx].phys_beg == 2348 phys_avail[pa_indx].phys_end) { 2349 /* 2350 * Current phys_avail is completely empty, 2351 * reuse the index. 2352 */ 2353 phys_avail[pa_indx].phys_beg = pa; 2354 phys_avail[pa_indx].phys_end = pa + incr; 2355 } else { 2356 /* 2357 * Allocate next phys_avail index. 2358 */ 2359 ++pa_indx; 2360 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 2361 kprintf( 2362 "Too many holes in the physical address space, giving up\n"); 2363 --pa_indx; 2364 full = TRUE; 2365 goto do_dump_avail; 2366 } 2367 phys_avail[pa_indx].phys_beg = pa; 2368 phys_avail[pa_indx].phys_end = pa + incr; 2369 } 2370 physmem += incr / PAGE_SIZE; 2371 2372 /* 2373 * pa available for dumping 2374 */ 2375 do_dump_avail: 2376 if (dump_avail[da_indx].phys_end == pa) { 2377 dump_avail[da_indx].phys_end += incr; 2378 } else { 2379 ++da_indx; 2380 if (da_indx == DUMP_AVAIL_ARRAY_END) { 2381 --da_indx; 2382 goto do_next; 2383 } 2384 dump_avail[da_indx].phys_beg = pa; 2385 dump_avail[da_indx].phys_end = pa + incr; 2386 } 2387 do_next: 2388 if (full) 2389 break; 2390 } 2391 } 2392 *pte = 0; 2393 cpu_invltlb(); 2394 cpu_mfence(); 2395 2396 /* 2397 * The last chunk must contain at least one page plus the message 2398 * buffer to avoid complicating other code (message buffer address 2399 * calculation, etc.). 2400 */ 2401 msgbuf_size = (MSGBUF_SIZE + PHYSMAP_ALIGN_MASK) & ~PHYSMAP_ALIGN_MASK; 2402 2403 while (phys_avail[pa_indx].phys_beg + PHYSMAP_ALIGN + msgbuf_size >= 2404 phys_avail[pa_indx].phys_end) { 2405 physmem -= atop(phys_avail[pa_indx].phys_end - 2406 phys_avail[pa_indx].phys_beg); 2407 phys_avail[pa_indx].phys_beg = 0; 2408 phys_avail[pa_indx].phys_end = 0; 2409 --pa_indx; 2410 } 2411 2412 Maxmem = atop(phys_avail[pa_indx].phys_end); 2413 2414 /* Trim off space for the message buffer. */ 2415 phys_avail[pa_indx].phys_end -= msgbuf_size; 2416 2417 avail_end = phys_avail[pa_indx].phys_end; 2418 2419 /* Map the message buffer. */ 2420 for (off = 0; off < msgbuf_size; off += PAGE_SIZE) { 2421 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2422 } 2423 2424 /* Try to get EFI framebuffer working as early as possible */ 2425 { 2426 /* 2427 * HACK: Setting machdep.hack_efifb_probe_early=1 works around 2428 * an issue that occurs on some recent systems where there is 2429 * no system console when booting via UEFI. Bug #3167. 2430 * 2431 * NOTE: This is not intended to be a permant fix. 2432 */ 2433 2434 int hack_efifb_probe_early = 0; 2435 TUNABLE_INT_FETCH("machdep.hack_efifb_probe_early", &hack_efifb_probe_early); 2436 2437 if (hack_efifb_probe_early) 2438 probe_efi_fb(1); 2439 else if (have_efi_framebuffer) 2440 efi_fb_init_vaddr(1); 2441 } 2442 } 2443 2444 struct machintr_abi MachIntrABI; 2445 2446 /* 2447 * IDT VECTORS: 2448 * 0 Divide by zero 2449 * 1 Debug 2450 * 2 NMI 2451 * 3 BreakPoint 2452 * 4 OverFlow 2453 * 5 Bound-Range 2454 * 6 Invalid OpCode 2455 * 7 Device Not Available (x87) 2456 * 8 Double-Fault 2457 * 9 Coprocessor Segment overrun (unsupported, reserved) 2458 * 10 Invalid-TSS 2459 * 11 Segment not present 2460 * 12 Stack 2461 * 13 General Protection 2462 * 14 Page Fault 2463 * 15 Reserved 2464 * 16 x87 FP Exception pending 2465 * 17 Alignment Check 2466 * 18 Machine Check 2467 * 19 SIMD floating point 2468 * 20-31 reserved 2469 * 32-255 INTn/external sources 2470 */ 2471 u_int64_t 2472 hammer_time(u_int64_t modulep, u_int64_t physfree) 2473 { 2474 caddr_t kmdp; 2475 int gsel_tss, x, cpu; 2476 #if 0 /* JG */ 2477 int metadata_missing, off; 2478 #endif 2479 struct mdglobaldata *gd; 2480 struct privatespace *ps; 2481 u_int64_t msr; 2482 2483 /* 2484 * Prevent lowering of the ipl if we call tsleep() early. 2485 */ 2486 gd = &CPU_prvspace[0]->mdglobaldata; 2487 ps = (struct privatespace *)gd; 2488 bzero(gd, sizeof(*gd)); 2489 bzero(&ps->common_tss, sizeof(ps->common_tss)); 2490 2491 /* 2492 * Note: on both UP and SMP curthread must be set non-NULL 2493 * early in the boot sequence because the system assumes 2494 * that 'curthread' is never NULL. 2495 */ 2496 2497 gd->mi.gd_curthread = &thread0; 2498 thread0.td_gd = &gd->mi; 2499 2500 atdevbase = ISA_HOLE_START + PTOV_OFFSET; 2501 2502 #if 0 /* JG */ 2503 metadata_missing = 0; 2504 if (bootinfo.bi_modulep) { 2505 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 2506 preload_bootstrap_relocate(KERNBASE); 2507 } else { 2508 metadata_missing = 1; 2509 } 2510 if (bootinfo.bi_envp) 2511 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 2512 #endif 2513 2514 preload_metadata = (caddr_t)(uintptr_t)(modulep + PTOV_OFFSET); 2515 preload_bootstrap_relocate(PTOV_OFFSET); 2516 kmdp = preload_search_by_type("elf kernel"); 2517 if (kmdp == NULL) 2518 kmdp = preload_search_by_type("elf64 kernel"); 2519 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 2520 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + PTOV_OFFSET; 2521 #ifdef DDB 2522 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 2523 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 2524 #endif 2525 efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t); 2526 2527 if (boothowto & RB_VERBOSE) 2528 bootverbose++; 2529 2530 /* 2531 * Default MachIntrABI to ICU 2532 */ 2533 MachIntrABI = MachIntrABI_ICU; 2534 2535 /* 2536 * start with one cpu. Note: with one cpu, ncpus_fit_mask remain 0. 2537 */ 2538 ncpus = 1; 2539 ncpus_fit = 1; 2540 /* Init basic tunables, hz etc */ 2541 init_param1(); 2542 2543 /* 2544 * make gdt memory segments 2545 */ 2546 gdt_segs[GPROC0_SEL].ssd_base = 2547 (uintptr_t) &CPU_prvspace[0]->common_tss; 2548 2549 gd->mi.gd_prvspace = CPU_prvspace[0]; 2550 2551 for (x = 0; x < NGDT; x++) { 2552 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 2553 ssdtosd(&gdt_segs[x], &gdt[x]); 2554 } 2555 ssdtosyssd(&gdt_segs[GPROC0_SEL], 2556 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 2557 2558 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 2559 r_gdt.rd_base = (long) gdt; 2560 lgdt(&r_gdt); 2561 2562 wrmsr(MSR_FSBASE, 0); /* User value */ 2563 wrmsr(MSR_GSBASE, (u_int64_t)&gd->mi); 2564 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 2565 2566 mi_gdinit(&gd->mi, 0); 2567 cpu_gdinit(gd, 0); 2568 proc0paddr = proc0paddr_buff; 2569 mi_proc0init(&gd->mi, proc0paddr); 2570 safepri = TDPRI_MAX; 2571 2572 /* spinlocks and the BGL */ 2573 init_locks(); 2574 2575 /* exceptions */ 2576 for (x = 0; x < NIDT; x++) 2577 setidt_global(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 2578 setidt_global(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 2579 setidt_global(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 2); 2580 setidt_global(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 1); 2581 setidt_global(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 2582 setidt_global(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 2583 setidt_global(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 2584 setidt_global(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 2585 setidt_global(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 2586 setidt_global(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 2587 setidt_global(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 2588 setidt_global(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 2589 setidt_global(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 2590 setidt_global(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 2591 setidt_global(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 2592 setidt_global(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 2593 setidt_global(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 2594 setidt_global(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 2595 setidt_global(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 2596 setidt_global(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 2597 2598 for (cpu = 0; cpu < MAXCPU; ++cpu) { 2599 r_idt_arr[cpu].rd_limit = sizeof(idt_arr[cpu]) - 1; 2600 r_idt_arr[cpu].rd_base = (long) &idt_arr[cpu][0]; 2601 } 2602 2603 lidt(&r_idt_arr[0]); 2604 2605 /* 2606 * Initialize the console before we print anything out. 2607 */ 2608 cninit(); 2609 2610 #if 0 /* JG */ 2611 if (metadata_missing) 2612 kprintf("WARNING: loader(8) metadata is missing!\n"); 2613 #endif 2614 2615 #if NISA >0 2616 elcr_probe(); 2617 isa_defaultirq(); 2618 #endif 2619 rand_initialize(); 2620 2621 /* 2622 * Initialize IRQ mapping 2623 * 2624 * NOTE: 2625 * SHOULD be after elcr_probe() 2626 */ 2627 MachIntrABI_ICU.initmap(); 2628 MachIntrABI_IOAPIC.initmap(); 2629 2630 #ifdef DDB 2631 kdb_init(); 2632 if (boothowto & RB_KDB) 2633 Debugger("Boot flags requested debugger"); 2634 #endif 2635 2636 identify_cpu(); /* Final stage of CPU initialization */ 2637 initializecpu(0); /* Initialize CPU registers */ 2638 2639 /* 2640 * On modern Intel cpus, haswell or later, cpu_idle_hlt=1 is better 2641 * because the cpu does significant power management in MWAIT 2642 * (also suggested is to set sysctl machdep.mwait.CX.idle=AUTODEEP). 2643 * 2644 * On many AMD cpus cpu_idle_hlt=3 is better, because the cpu does 2645 * significant power management only when using ACPI halt mode. 2646 * (However, on Ryzen, mode 4 (HLT) also does power management). 2647 * 2648 * On older AMD or Intel cpus, cpu_idle_hlt=2 is better because ACPI 2649 * is needed to reduce power consumption, but wakeup times are often 2650 * too long. 2651 */ 2652 if (cpu_vendor_id == CPU_VENDOR_INTEL && 2653 CPUID_TO_MODEL(cpu_id) >= 0x3C) { /* Haswell or later */ 2654 cpu_idle_hlt = 1; 2655 } 2656 if (cpu_vendor_id == CPU_VENDOR_AMD) { 2657 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) { 2658 /* Ryzen or later */ 2659 cpu_idle_hlt = 3; 2660 } else if (CPUID_TO_FAMILY(cpu_id) >= 0x14) { 2661 /* Bobcat or later */ 2662 cpu_idle_hlt = 3; 2663 } 2664 } 2665 2666 TUNABLE_INT_FETCH("hw.apic_io_enable", &ioapic_enable); /* for compat */ 2667 TUNABLE_INT_FETCH("hw.ioapic_enable", &ioapic_enable); 2668 TUNABLE_INT_FETCH("hw.lapic_enable", &lapic_enable); 2669 TUNABLE_INT_FETCH("machdep.cpu_idle_hlt", &cpu_idle_hlt); 2670 2671 /* 2672 * Some of the virtual machines do not work w/ I/O APIC 2673 * enabled. If the user does not explicitly enable or 2674 * disable the I/O APIC (ioapic_enable < 0), then we 2675 * disable I/O APIC on all virtual machines. 2676 * 2677 * NOTE: 2678 * This must be done after identify_cpu(), which sets 2679 * 'cpu_feature2' 2680 */ 2681 if (ioapic_enable < 0) { 2682 if (cpu_feature2 & CPUID2_VMM) 2683 ioapic_enable = 0; 2684 else 2685 ioapic_enable = 1; 2686 } 2687 2688 /* 2689 * TSS entry point for interrupts, traps, and exceptions 2690 * (sans NMI). This will always go to near the top of the pcpu 2691 * trampoline area. Hardware-pushed data will be copied into 2692 * the trap-frame on entry, and (if necessary) returned to the 2693 * trampoline on exit. 2694 * 2695 * We store some pcb data for the trampoline code above the 2696 * stack the cpu hw pushes into, and arrange things so the 2697 * address of tr_pcb_rsp is the same as the desired top of 2698 * stack. 2699 */ 2700 ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp; 2701 ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0; 2702 ps->trampoline.tr_pcb_gs_kernel = (register_t)gd; 2703 ps->trampoline.tr_pcb_cr3 = KPML4phys; /* adj to user cr3 live */ 2704 ps->dbltramp.tr_pcb_gs_kernel = (register_t)gd; 2705 ps->dbltramp.tr_pcb_cr3 = KPML4phys; 2706 ps->dbgtramp.tr_pcb_gs_kernel = (register_t)gd; 2707 ps->dbgtramp.tr_pcb_cr3 = KPML4phys; 2708 2709 /* double fault stack */ 2710 ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp; 2711 /* #DB debugger needs its own stack */ 2712 ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp; 2713 2714 /* Set the IO permission bitmap (empty due to tss seg limit) */ 2715 ps->common_tss.tss_iobase = sizeof(struct x86_64tss); 2716 2717 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 2718 gd->gd_tss_gdt = &gdt[GPROC0_SEL]; 2719 gd->gd_common_tssd = *gd->gd_tss_gdt; 2720 ltr(gsel_tss); 2721 2722 /* Set up the fast syscall stuff */ 2723 msr = rdmsr(MSR_EFER) | EFER_SCE; 2724 wrmsr(MSR_EFER, msr); 2725 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 2726 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 2727 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 2728 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 2729 wrmsr(MSR_STAR, msr); 2730 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL|PSL_AC); 2731 2732 getmemsize(kmdp, physfree); 2733 init_param2(physmem); 2734 2735 /* now running on new page tables, configured,and u/iom is accessible */ 2736 2737 /* Map the message buffer. */ 2738 #if 0 /* JG */ 2739 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2740 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2741 #endif 2742 2743 msgbufinit(msgbufp, MSGBUF_SIZE); 2744 2745 2746 /* transfer to user mode */ 2747 2748 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 2749 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 2750 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 2751 2752 load_ds(_udatasel); 2753 load_es(_udatasel); 2754 load_fs(_udatasel); 2755 2756 /* setup proc 0's pcb */ 2757 thread0.td_pcb->pcb_flags = 0; 2758 thread0.td_pcb->pcb_cr3 = KPML4phys; 2759 thread0.td_pcb->pcb_cr3_iso = 0; 2760 thread0.td_pcb->pcb_ext = NULL; 2761 lwp0.lwp_md.md_regs = &proc0_tf; /* XXX needed? */ 2762 2763 /* Location of kernel stack for locore */ 2764 return ((u_int64_t)thread0.td_pcb); 2765 } 2766 2767 /* 2768 * Initialize machine-dependant portions of the global data structure. 2769 * Note that the global data area and cpu0's idlestack in the private 2770 * data space were allocated in locore. 2771 * 2772 * Note: the idlethread's cpl is 0 2773 * 2774 * WARNING! Called from early boot, 'mycpu' may not work yet. 2775 */ 2776 void 2777 cpu_gdinit(struct mdglobaldata *gd, int cpu) 2778 { 2779 if (cpu) 2780 gd->mi.gd_curthread = &gd->mi.gd_idlethread; 2781 2782 lwkt_init_thread(&gd->mi.gd_idlethread, 2783 gd->mi.gd_prvspace->idlestack, 2784 sizeof(gd->mi.gd_prvspace->idlestack), 2785 0, &gd->mi); 2786 lwkt_set_comm(&gd->mi.gd_idlethread, "idle_%d", cpu); 2787 gd->mi.gd_idlethread.td_switch = cpu_lwkt_switch; 2788 gd->mi.gd_idlethread.td_sp -= sizeof(void *); 2789 *(void **)gd->mi.gd_idlethread.td_sp = cpu_idle_restore; 2790 } 2791 2792 /* 2793 * We only have to check for DMAP bounds, the globaldata space is 2794 * actually part of the kernel_map so we don't have to waste time 2795 * checking CPU_prvspace[*]. 2796 */ 2797 int 2798 is_globaldata_space(vm_offset_t saddr, vm_offset_t eaddr) 2799 { 2800 #if 0 2801 if (saddr >= (vm_offset_t)&CPU_prvspace[0] && 2802 eaddr <= (vm_offset_t)&CPU_prvspace[MAXCPU]) { 2803 return (TRUE); 2804 } 2805 #endif 2806 if (saddr >= DMAP_MIN_ADDRESS && eaddr <= DMAP_MAX_ADDRESS) 2807 return (TRUE); 2808 return (FALSE); 2809 } 2810 2811 struct globaldata * 2812 globaldata_find(int cpu) 2813 { 2814 KKASSERT(cpu >= 0 && cpu < ncpus); 2815 return(&CPU_prvspace[cpu]->mdglobaldata.mi); 2816 } 2817 2818 /* 2819 * This path should be safe from the SYSRET issue because only stopped threads 2820 * can have their %rip adjusted this way (and all heavy weight thread switches 2821 * clear QUICKREF and thus do not use SYSRET). However, the code path is 2822 * convoluted so add a safety by forcing %rip to be cannonical. 2823 */ 2824 int 2825 ptrace_set_pc(struct lwp *lp, unsigned long addr) 2826 { 2827 if (addr & 0x0000800000000000LLU) 2828 lp->lwp_md.md_regs->tf_rip = addr | 0xFFFF000000000000LLU; 2829 else 2830 lp->lwp_md.md_regs->tf_rip = addr & 0x0000FFFFFFFFFFFFLLU; 2831 return (0); 2832 } 2833 2834 int 2835 ptrace_single_step(struct lwp *lp) 2836 { 2837 lp->lwp_md.md_regs->tf_rflags |= PSL_T; 2838 return (0); 2839 } 2840 2841 int 2842 fill_regs(struct lwp *lp, struct reg *regs) 2843 { 2844 struct trapframe *tp; 2845 2846 if ((tp = lp->lwp_md.md_regs) == NULL) 2847 return EINVAL; 2848 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs)); 2849 return (0); 2850 } 2851 2852 int 2853 set_regs(struct lwp *lp, struct reg *regs) 2854 { 2855 struct trapframe *tp; 2856 2857 tp = lp->lwp_md.md_regs; 2858 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 2859 !CS_SECURE(regs->r_cs)) 2860 return (EINVAL); 2861 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs)); 2862 clear_quickret(); 2863 return (0); 2864 } 2865 2866 static void 2867 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 2868 { 2869 struct env87 *penv_87 = &sv_87->sv_env; 2870 struct envxmm *penv_xmm = &sv_xmm->sv_env; 2871 int i; 2872 2873 /* FPU control/status */ 2874 penv_87->en_cw = penv_xmm->en_cw; 2875 penv_87->en_sw = penv_xmm->en_sw; 2876 penv_87->en_tw = penv_xmm->en_tw; 2877 penv_87->en_fip = penv_xmm->en_fip; 2878 penv_87->en_fcs = penv_xmm->en_fcs; 2879 penv_87->en_opcode = penv_xmm->en_opcode; 2880 penv_87->en_foo = penv_xmm->en_foo; 2881 penv_87->en_fos = penv_xmm->en_fos; 2882 2883 /* FPU registers */ 2884 for (i = 0; i < 8; ++i) 2885 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2886 } 2887 2888 static void 2889 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 2890 { 2891 struct env87 *penv_87 = &sv_87->sv_env; 2892 struct envxmm *penv_xmm = &sv_xmm->sv_env; 2893 int i; 2894 2895 /* FPU control/status */ 2896 penv_xmm->en_cw = penv_87->en_cw; 2897 penv_xmm->en_sw = penv_87->en_sw; 2898 penv_xmm->en_tw = penv_87->en_tw; 2899 penv_xmm->en_fip = penv_87->en_fip; 2900 penv_xmm->en_fcs = penv_87->en_fcs; 2901 penv_xmm->en_opcode = penv_87->en_opcode; 2902 penv_xmm->en_foo = penv_87->en_foo; 2903 penv_xmm->en_fos = penv_87->en_fos; 2904 2905 /* FPU registers */ 2906 for (i = 0; i < 8; ++i) 2907 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2908 } 2909 2910 int 2911 fill_fpregs(struct lwp *lp, struct fpreg *fpregs) 2912 { 2913 if (lp->lwp_thread == NULL || lp->lwp_thread->td_pcb == NULL) 2914 return EINVAL; 2915 if (cpu_fxsr) { 2916 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm, 2917 (struct save87 *)fpregs); 2918 return (0); 2919 } 2920 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 2921 return (0); 2922 } 2923 2924 int 2925 set_fpregs(struct lwp *lp, struct fpreg *fpregs) 2926 { 2927 if (cpu_fxsr) { 2928 set_fpregs_xmm((struct save87 *)fpregs, 2929 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm); 2930 return (0); 2931 } 2932 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs); 2933 return (0); 2934 } 2935 2936 int 2937 fill_dbregs(struct lwp *lp, struct dbreg *dbregs) 2938 { 2939 struct pcb *pcb; 2940 2941 if (lp == NULL) { 2942 dbregs->dr[0] = rdr0(); 2943 dbregs->dr[1] = rdr1(); 2944 dbregs->dr[2] = rdr2(); 2945 dbregs->dr[3] = rdr3(); 2946 dbregs->dr[4] = rdr4(); 2947 dbregs->dr[5] = rdr5(); 2948 dbregs->dr[6] = rdr6(); 2949 dbregs->dr[7] = rdr7(); 2950 return (0); 2951 } 2952 if (lp->lwp_thread == NULL || (pcb = lp->lwp_thread->td_pcb) == NULL) 2953 return EINVAL; 2954 dbregs->dr[0] = pcb->pcb_dr0; 2955 dbregs->dr[1] = pcb->pcb_dr1; 2956 dbregs->dr[2] = pcb->pcb_dr2; 2957 dbregs->dr[3] = pcb->pcb_dr3; 2958 dbregs->dr[4] = 0; 2959 dbregs->dr[5] = 0; 2960 dbregs->dr[6] = pcb->pcb_dr6; 2961 dbregs->dr[7] = pcb->pcb_dr7; 2962 return (0); 2963 } 2964 2965 int 2966 set_dbregs(struct lwp *lp, struct dbreg *dbregs) 2967 { 2968 if (lp == NULL) { 2969 load_dr0(dbregs->dr[0]); 2970 load_dr1(dbregs->dr[1]); 2971 load_dr2(dbregs->dr[2]); 2972 load_dr3(dbregs->dr[3]); 2973 load_dr4(dbregs->dr[4]); 2974 load_dr5(dbregs->dr[5]); 2975 load_dr6(dbregs->dr[6]); 2976 load_dr7(dbregs->dr[7]); 2977 } else { 2978 struct pcb *pcb; 2979 struct ucred *ucred; 2980 int i; 2981 uint64_t mask1, mask2; 2982 2983 /* 2984 * Don't let an illegal value for dr7 get set. Specifically, 2985 * check for undefined settings. Setting these bit patterns 2986 * result in undefined behaviour and can lead to an unexpected 2987 * TRCTRAP. 2988 */ 2989 /* JG this loop looks unreadable */ 2990 /* Check 4 2-bit fields for invalid patterns. 2991 * These fields are R/Wi, for i = 0..3 2992 */ 2993 /* Is 10 in LENi allowed when running in compatibility mode? */ 2994 /* Pattern 10 in R/Wi might be used to indicate 2995 * breakpoint on I/O. Further analysis should be 2996 * carried to decide if it is safe and useful to 2997 * provide access to that capability 2998 */ 2999 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4; 3000 i++, mask1 <<= 4, mask2 <<= 4) 3001 if ((dbregs->dr[7] & mask1) == mask2) 3002 return (EINVAL); 3003 3004 pcb = lp->lwp_thread->td_pcb; 3005 ucred = lp->lwp_proc->p_ucred; 3006 3007 /* 3008 * Don't let a process set a breakpoint that is not within the 3009 * process's address space. If a process could do this, it 3010 * could halt the system by setting a breakpoint in the kernel 3011 * (if ddb was enabled). Thus, we need to check to make sure 3012 * that no breakpoints are being enabled for addresses outside 3013 * process's address space, unless, perhaps, we were called by 3014 * uid 0. 3015 * 3016 * XXX - what about when the watched area of the user's 3017 * address space is written into from within the kernel 3018 * ... wouldn't that still cause a breakpoint to be generated 3019 * from within kernel mode? 3020 */ 3021 3022 if (priv_check_cred(ucred, PRIV_ROOT, 0) != 0) { 3023 if (dbregs->dr[7] & 0x3) { 3024 /* dr0 is enabled */ 3025 if (dbregs->dr[0] >= VM_MAX_USER_ADDRESS) 3026 return (EINVAL); 3027 } 3028 3029 if (dbregs->dr[7] & (0x3<<2)) { 3030 /* dr1 is enabled */ 3031 if (dbregs->dr[1] >= VM_MAX_USER_ADDRESS) 3032 return (EINVAL); 3033 } 3034 3035 if (dbregs->dr[7] & (0x3<<4)) { 3036 /* dr2 is enabled */ 3037 if (dbregs->dr[2] >= VM_MAX_USER_ADDRESS) 3038 return (EINVAL); 3039 } 3040 3041 if (dbregs->dr[7] & (0x3<<6)) { 3042 /* dr3 is enabled */ 3043 if (dbregs->dr[3] >= VM_MAX_USER_ADDRESS) 3044 return (EINVAL); 3045 } 3046 } 3047 3048 pcb->pcb_dr0 = dbregs->dr[0]; 3049 pcb->pcb_dr1 = dbregs->dr[1]; 3050 pcb->pcb_dr2 = dbregs->dr[2]; 3051 pcb->pcb_dr3 = dbregs->dr[3]; 3052 pcb->pcb_dr6 = dbregs->dr[6]; 3053 pcb->pcb_dr7 = dbregs->dr[7]; 3054 3055 pcb->pcb_flags |= PCB_DBREGS; 3056 } 3057 3058 return (0); 3059 } 3060 3061 /* 3062 * Return > 0 if a hardware breakpoint has been hit, and the 3063 * breakpoint was in user space. Return 0, otherwise. 3064 */ 3065 int 3066 user_dbreg_trap(void) 3067 { 3068 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 3069 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 3070 int nbp; /* number of breakpoints that triggered */ 3071 caddr_t addr[4]; /* breakpoint addresses */ 3072 int i; 3073 3074 dr7 = rdr7(); 3075 if ((dr7 & 0xff) == 0) { 3076 /* 3077 * all GE and LE bits in the dr7 register are zero, 3078 * thus the trap couldn't have been caused by the 3079 * hardware debug registers 3080 */ 3081 return 0; 3082 } 3083 3084 nbp = 0; 3085 dr6 = rdr6(); 3086 bp = dr6 & 0xf; 3087 3088 if (bp == 0) { 3089 /* 3090 * None of the breakpoint bits are set meaning this 3091 * trap was not caused by any of the debug registers 3092 */ 3093 return 0; 3094 } 3095 3096 /* 3097 * at least one of the breakpoints were hit, check to see 3098 * which ones and if any of them are user space addresses 3099 */ 3100 3101 if (bp & 0x01) { 3102 addr[nbp++] = (caddr_t)rdr0(); 3103 } 3104 if (bp & 0x02) { 3105 addr[nbp++] = (caddr_t)rdr1(); 3106 } 3107 if (bp & 0x04) { 3108 addr[nbp++] = (caddr_t)rdr2(); 3109 } 3110 if (bp & 0x08) { 3111 addr[nbp++] = (caddr_t)rdr3(); 3112 } 3113 3114 for (i = 0; i < nbp; i++) { 3115 if (addr[i] < (caddr_t)VM_MAX_USER_ADDRESS) { 3116 /* 3117 * addr[i] is in user space 3118 */ 3119 return nbp; 3120 } 3121 } 3122 3123 /* 3124 * None of the breakpoints are in user space. 3125 */ 3126 return 0; 3127 } 3128 3129 3130 #ifndef DDB 3131 void 3132 Debugger(const char *msg) 3133 { 3134 kprintf("Debugger(\"%s\") called.\n", msg); 3135 } 3136 #endif /* no DDB */ 3137 3138 #ifdef DDB 3139 3140 /* 3141 * Provide inb() and outb() as functions. They are normally only 3142 * available as macros calling inlined functions, thus cannot be 3143 * called inside DDB. 3144 * 3145 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 3146 */ 3147 3148 #undef inb 3149 #undef outb 3150 3151 /* silence compiler warnings */ 3152 u_char inb(u_int); 3153 void outb(u_int, u_char); 3154 3155 u_char 3156 inb(u_int port) 3157 { 3158 u_char data; 3159 /* 3160 * We use %%dx and not %1 here because i/o is done at %dx and not at 3161 * %edx, while gcc generates inferior code (movw instead of movl) 3162 * if we tell it to load (u_short) port. 3163 */ 3164 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 3165 return (data); 3166 } 3167 3168 void 3169 outb(u_int port, u_char data) 3170 { 3171 u_char al; 3172 /* 3173 * Use an unnecessary assignment to help gcc's register allocator. 3174 * This make a large difference for gcc-1.40 and a tiny difference 3175 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 3176 * best results. gcc-2.6.0 can't handle this. 3177 */ 3178 al = data; 3179 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 3180 } 3181 3182 #endif /* DDB */ 3183 3184 3185 3186 /* 3187 * initialize all the SMP locks 3188 */ 3189 3190 /* critical region when masking or unmasking interupts */ 3191 struct spinlock_deprecated imen_spinlock; 3192 3193 /* locks com (tty) data/hardware accesses: a FASTINTR() */ 3194 struct spinlock_deprecated com_spinlock; 3195 3196 /* lock regions around the clock hardware */ 3197 struct spinlock_deprecated clock_spinlock; 3198 3199 static void 3200 init_locks(void) 3201 { 3202 /* 3203 * Get the initial mplock with a count of 1 for the BSP. 3204 * This uses a LOGICAL cpu ID, ie BSP == 0. 3205 */ 3206 cpu_get_initial_mplock(); 3207 /* DEPRECATED */ 3208 spin_init_deprecated(&imen_spinlock); 3209 spin_init_deprecated(&com_spinlock); 3210 spin_init_deprecated(&clock_spinlock); 3211 3212 /* our token pool needs to work early */ 3213 lwkt_token_pool_init(); 3214 } 3215 3216 boolean_t 3217 cpu_mwait_hint_valid(uint32_t hint) 3218 { 3219 int cx_idx, sub; 3220 3221 cx_idx = MWAIT_EAX_TO_CX(hint); 3222 if (cx_idx >= CPU_MWAIT_CX_MAX) 3223 return FALSE; 3224 3225 sub = MWAIT_EAX_TO_CX_SUB(hint); 3226 if (sub >= cpu_mwait_cx_info[cx_idx].subcnt) 3227 return FALSE; 3228 3229 return TRUE; 3230 } 3231 3232 void 3233 cpu_mwait_cx_no_bmsts(void) 3234 { 3235 atomic_clear_int(&cpu_mwait_c3_preamble, CPU_MWAIT_C3_PREAMBLE_BM_STS); 3236 } 3237 3238 void 3239 cpu_mwait_cx_no_bmarb(void) 3240 { 3241 atomic_clear_int(&cpu_mwait_c3_preamble, CPU_MWAIT_C3_PREAMBLE_BM_ARB); 3242 } 3243 3244 static int 3245 cpu_mwait_cx_hint2name(int hint, char *name, int namelen, boolean_t allow_auto) 3246 { 3247 int old_cx_idx, sub = 0; 3248 3249 if (hint >= 0) { 3250 old_cx_idx = MWAIT_EAX_TO_CX(hint); 3251 sub = MWAIT_EAX_TO_CX_SUB(hint); 3252 } else if (hint == CPU_MWAIT_HINT_AUTO) { 3253 old_cx_idx = allow_auto ? CPU_MWAIT_C2 : CPU_MWAIT_CX_MAX; 3254 } else if (hint == CPU_MWAIT_HINT_AUTODEEP) { 3255 old_cx_idx = allow_auto ? CPU_MWAIT_C3 : CPU_MWAIT_CX_MAX; 3256 } else { 3257 old_cx_idx = CPU_MWAIT_CX_MAX; 3258 } 3259 3260 if (!CPU_MWAIT_HAS_CX) 3261 strlcpy(name, "NONE", namelen); 3262 else if (allow_auto && hint == CPU_MWAIT_HINT_AUTO) 3263 strlcpy(name, "AUTO", namelen); 3264 else if (allow_auto && hint == CPU_MWAIT_HINT_AUTODEEP) 3265 strlcpy(name, "AUTODEEP", namelen); 3266 else if (old_cx_idx >= CPU_MWAIT_CX_MAX || 3267 sub >= cpu_mwait_cx_info[old_cx_idx].subcnt) 3268 strlcpy(name, "INVALID", namelen); 3269 else 3270 ksnprintf(name, namelen, "C%d/%d", old_cx_idx, sub); 3271 3272 return old_cx_idx; 3273 } 3274 3275 static int 3276 cpu_mwait_cx_name2hint(char *name, int *hint0, boolean_t allow_auto) 3277 { 3278 int cx_idx, sub, hint; 3279 char *ptr, *start; 3280 3281 if (allow_auto && strcmp(name, "AUTO") == 0) { 3282 hint = CPU_MWAIT_HINT_AUTO; 3283 cx_idx = CPU_MWAIT_C2; 3284 goto done; 3285 } 3286 if (allow_auto && strcmp(name, "AUTODEEP") == 0) { 3287 hint = CPU_MWAIT_HINT_AUTODEEP; 3288 cx_idx = CPU_MWAIT_C3; 3289 goto done; 3290 } 3291 3292 if (strlen(name) < 4 || toupper(name[0]) != 'C') 3293 return -1; 3294 start = &name[1]; 3295 ptr = NULL; 3296 3297 cx_idx = strtol(start, &ptr, 10); 3298 if (ptr == start || *ptr != '/') 3299 return -1; 3300 if (cx_idx < 0 || cx_idx >= CPU_MWAIT_CX_MAX) 3301 return -1; 3302 3303 start = ptr + 1; 3304 ptr = NULL; 3305 3306 sub = strtol(start, &ptr, 10); 3307 if (*ptr != '\0') 3308 return -1; 3309 if (sub < 0 || sub >= cpu_mwait_cx_info[cx_idx].subcnt) 3310 return -1; 3311 3312 hint = MWAIT_EAX_HINT(cx_idx, sub); 3313 done: 3314 *hint0 = hint; 3315 return cx_idx; 3316 } 3317 3318 static int 3319 cpu_mwait_cx_transit(int old_cx_idx, int cx_idx) 3320 { 3321 if (cx_idx >= CPU_MWAIT_C3 && cpu_mwait_c3_preamble) 3322 return EOPNOTSUPP; 3323 if (old_cx_idx < CPU_MWAIT_C3 && cx_idx >= CPU_MWAIT_C3) { 3324 int error; 3325 3326 error = cputimer_intr_powersave_addreq(); 3327 if (error) 3328 return error; 3329 } else if (old_cx_idx >= CPU_MWAIT_C3 && cx_idx < CPU_MWAIT_C3) { 3330 cputimer_intr_powersave_remreq(); 3331 } 3332 return 0; 3333 } 3334 3335 static int 3336 cpu_mwait_cx_select_sysctl(SYSCTL_HANDLER_ARGS, int *hint0, 3337 boolean_t allow_auto) 3338 { 3339 int error, cx_idx, old_cx_idx, hint; 3340 char name[CPU_MWAIT_CX_NAMELEN]; 3341 3342 hint = *hint0; 3343 old_cx_idx = cpu_mwait_cx_hint2name(hint, name, sizeof(name), 3344 allow_auto); 3345 3346 error = sysctl_handle_string(oidp, name, sizeof(name), req); 3347 if (error != 0 || req->newptr == NULL) 3348 return error; 3349 3350 if (!CPU_MWAIT_HAS_CX) 3351 return EOPNOTSUPP; 3352 3353 cx_idx = cpu_mwait_cx_name2hint(name, &hint, allow_auto); 3354 if (cx_idx < 0) 3355 return EINVAL; 3356 3357 error = cpu_mwait_cx_transit(old_cx_idx, cx_idx); 3358 if (error) 3359 return error; 3360 3361 *hint0 = hint; 3362 return 0; 3363 } 3364 3365 static int 3366 cpu_mwait_cx_setname(struct cpu_idle_stat *stat, const char *cx_name) 3367 { 3368 int error, cx_idx, old_cx_idx, hint; 3369 char name[CPU_MWAIT_CX_NAMELEN]; 3370 3371 KASSERT(CPU_MWAIT_HAS_CX, ("cpu does not support mwait CX extension")); 3372 3373 hint = stat->hint; 3374 old_cx_idx = cpu_mwait_cx_hint2name(hint, name, sizeof(name), TRUE); 3375 3376 strlcpy(name, cx_name, sizeof(name)); 3377 cx_idx = cpu_mwait_cx_name2hint(name, &hint, TRUE); 3378 if (cx_idx < 0) 3379 return EINVAL; 3380 3381 error = cpu_mwait_cx_transit(old_cx_idx, cx_idx); 3382 if (error) 3383 return error; 3384 3385 stat->hint = hint; 3386 return 0; 3387 } 3388 3389 static int 3390 cpu_mwait_cx_idle_sysctl(SYSCTL_HANDLER_ARGS) 3391 { 3392 int hint = cpu_mwait_halt_global; 3393 int error, cx_idx, cpu; 3394 char name[CPU_MWAIT_CX_NAMELEN], cx_name[CPU_MWAIT_CX_NAMELEN]; 3395 3396 cpu_mwait_cx_hint2name(hint, name, sizeof(name), TRUE); 3397 3398 error = sysctl_handle_string(oidp, name, sizeof(name), req); 3399 if (error != 0 || req->newptr == NULL) 3400 return error; 3401 3402 if (!CPU_MWAIT_HAS_CX) 3403 return EOPNOTSUPP; 3404 3405 /* Save name for later per-cpu CX configuration */ 3406 strlcpy(cx_name, name, sizeof(cx_name)); 3407 3408 cx_idx = cpu_mwait_cx_name2hint(name, &hint, TRUE); 3409 if (cx_idx < 0) 3410 return EINVAL; 3411 3412 /* Change per-cpu CX configuration */ 3413 for (cpu = 0; cpu < ncpus; ++cpu) { 3414 error = cpu_mwait_cx_setname(&cpu_idle_stats[cpu], cx_name); 3415 if (error) 3416 return error; 3417 } 3418 3419 cpu_mwait_halt_global = hint; 3420 return 0; 3421 } 3422 3423 static int 3424 cpu_mwait_cx_pcpu_idle_sysctl(SYSCTL_HANDLER_ARGS) 3425 { 3426 struct cpu_idle_stat *stat = arg1; 3427 int error; 3428 3429 error = cpu_mwait_cx_select_sysctl(oidp, arg1, arg2, req, 3430 &stat->hint, TRUE); 3431 return error; 3432 } 3433 3434 static int 3435 cpu_mwait_cx_spin_sysctl(SYSCTL_HANDLER_ARGS) 3436 { 3437 int error; 3438 3439 error = cpu_mwait_cx_select_sysctl(oidp, arg1, arg2, req, 3440 &cpu_mwait_spin, FALSE); 3441 return error; 3442 } 3443 3444 /* 3445 * This manual debugging code is called unconditionally from Xtimer 3446 * (the per-cpu timer interrupt) whether the current thread is in a 3447 * critical section or not) and can be useful in tracking down lockups. 3448 * 3449 * NOTE: MANUAL DEBUG CODE 3450 */ 3451 #if 0 3452 static int saveticks[SMP_MAXCPU]; 3453 static int savecounts[SMP_MAXCPU]; 3454 #endif 3455 3456 void 3457 pcpu_timer_always(struct intrframe *frame) 3458 { 3459 #if 0 3460 globaldata_t gd = mycpu; 3461 int cpu = gd->gd_cpuid; 3462 char buf[64]; 3463 short *gptr; 3464 int i; 3465 3466 if (cpu <= 20) { 3467 gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu; 3468 *gptr = ((*gptr + 1) & 0x00FF) | 0x0700; 3469 ++gptr; 3470 3471 ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ", 3472 (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks, 3473 gd->gd_infomsg); 3474 for (i = 0; buf[i]; ++i) { 3475 gptr[i] = 0x0700 | (unsigned char)buf[i]; 3476 } 3477 } 3478 #if 0 3479 if (saveticks[gd->gd_cpuid] != ticks) { 3480 saveticks[gd->gd_cpuid] = ticks; 3481 savecounts[gd->gd_cpuid] = 0; 3482 } 3483 ++savecounts[gd->gd_cpuid]; 3484 if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) { 3485 panic("cpud %d panicing on ticks failure", 3486 gd->gd_cpuid); 3487 } 3488 for (i = 0; i < ncpus; ++i) { 3489 int delta; 3490 if (saveticks[i] && panicstr == NULL) { 3491 delta = saveticks[i] - ticks; 3492 if (delta < -10 || delta > 10) { 3493 panic("cpu %d panicing on cpu %d watchdog", 3494 gd->gd_cpuid, i); 3495 } 3496 } 3497 } 3498 #endif 3499 #endif 3500 } 3501 3502 SET_DECLARE(smap_open, char); 3503 SET_DECLARE(smap_close, char); 3504 3505 static void 3506 cpu_implement_smap(void) 3507 { 3508 char **scan; 3509 3510 for (scan = SET_BEGIN(smap_open); /* nop -> stac */ 3511 scan < SET_LIMIT(smap_open); ++scan) { 3512 (*scan)[0] = 0x0F; 3513 (*scan)[1] = 0x01; 3514 (*scan)[2] = 0xCB; 3515 } 3516 for (scan = SET_BEGIN(smap_close); /* nop -> clac */ 3517 scan < SET_LIMIT(smap_close); ++scan) { 3518 (*scan)[0] = 0x0F; 3519 (*scan)[1] = 0x01; 3520 (*scan)[2] = 0xCA; 3521 } 3522 } 3523