1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1996, by Steve Passe 5 * Copyright (c) 2003, by Peter Wemm 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. The name of the developer may NOT be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_acpi.h" 33 #include "opt_cpu.h" 34 #include "opt_ddb.h" 35 #include "opt_kstack_pages.h" 36 #include "opt_sched.h" 37 #include "opt_smp.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/cpuset.h> 43 #include <sys/domainset.h> 44 #ifdef GPROF 45 #include <sys/gmon.h> 46 #endif 47 #include <sys/kdb.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/memrange.h> 53 #include <sys/mutex.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sched.h> 57 #include <sys/smp.h> 58 #include <sys/sysctl.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_param.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_extern.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_phys.h> 67 68 #include <x86/apicreg.h> 69 #include <machine/clock.h> 70 #include <machine/cputypes.h> 71 #include <machine/cpufunc.h> 72 #include <x86/mca.h> 73 #include <machine/md_var.h> 74 #include <machine/pcb.h> 75 #include <machine/psl.h> 76 #include <machine/smp.h> 77 #include <machine/specialreg.h> 78 #include <machine/tss.h> 79 #include <x86/ucode.h> 80 #include <machine/cpu.h> 81 #include <x86/init.h> 82 83 #ifdef DEV_ACPI 84 #include <contrib/dev/acpica/include/acpi.h> 85 #include <dev/acpica/acpivar.h> 86 #endif 87 88 #define WARMBOOT_TARGET 0 89 #define WARMBOOT_OFF (KERNBASE + 0x0467) 90 #define WARMBOOT_SEG (KERNBASE + 0x0469) 91 92 #define CMOS_REG (0x70) 93 #define CMOS_DATA (0x71) 94 #define BIOS_RESET (0x0f) 95 #define BIOS_WARM (0x0a) 96 97 #define GiB(v) (v ## ULL << 30) 98 99 #define AP_BOOTPT_SZ (PAGE_SIZE * 4) 100 101 /* Temporary variables for init_secondary() */ 102 static char *doublefault_stack; 103 static char *mce_stack; 104 static char *nmi_stack; 105 static char *dbg_stack; 106 void *bootpcpu; 107 108 extern u_int mptramp_la57; 109 extern u_int mptramp_nx; 110 111 /* 112 * Local data and functions. 113 */ 114 115 static int start_ap(int apic_id, vm_paddr_t boot_address); 116 117 /* 118 * Initialize the IPI handlers and start up the AP's. 119 */ 120 void 121 cpu_mp_start(void) 122 { 123 int i; 124 125 /* Initialize the logical ID to APIC ID table. */ 126 for (i = 0; i < MAXCPU; i++) { 127 cpu_apic_ids[i] = -1; 128 } 129 130 /* Install an inter-CPU IPI for cache and TLB invalidations. */ 131 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop), 132 SDT_SYSIGT, SEL_KPL, 0); 133 134 /* Install an inter-CPU IPI for all-CPU rendezvous */ 135 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) : 136 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0); 137 138 /* Install generic inter-CPU IPI handler */ 139 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) : 140 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0); 141 142 /* Install an inter-CPU IPI for CPU stop/restart */ 143 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop), 144 SDT_SYSIGT, SEL_KPL, 0); 145 146 /* Install an inter-CPU IPI for CPU suspend/resume */ 147 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend), 148 SDT_SYSIGT, SEL_KPL, 0); 149 150 /* Install an IPI for calling delayed SWI */ 151 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi), 152 SDT_SYSIGT, SEL_KPL, 0); 153 154 /* Set boot_cpu_id if needed. */ 155 if (boot_cpu_id == -1) { 156 boot_cpu_id = PCPU_GET(apic_id); 157 cpu_info[boot_cpu_id].cpu_bsp = 1; 158 } else 159 KASSERT(boot_cpu_id == PCPU_GET(apic_id), 160 ("BSP's APIC ID doesn't match boot_cpu_id")); 161 162 /* Probe logical/physical core configuration. */ 163 topo_probe(); 164 165 assign_cpu_ids(); 166 167 mptramp_la57 = la57; 168 mptramp_nx = pg_nx != 0; 169 MPASS(kernel_pmap->pm_cr3 < (1UL << 32)); 170 mptramp_pagetables = kernel_pmap->pm_cr3; 171 172 /* Start each Application Processor */ 173 start_all_aps(); 174 175 set_interrupt_apic_ids(); 176 177 #if defined(DEV_ACPI) && MAXMEMDOM > 1 178 acpi_pxm_set_cpu_locality(); 179 #endif 180 } 181 182 /* 183 * AP CPU's call this to initialize themselves. 184 */ 185 void 186 init_secondary(void) 187 { 188 struct pcpu *pc; 189 struct nmi_pcpu *np; 190 struct user_segment_descriptor *gdt; 191 struct region_descriptor ap_gdt; 192 u_int64_t cr0; 193 int cpu, gsel_tss, x; 194 195 /* Set by the startup code for us to use */ 196 cpu = bootAP; 197 198 /* Update microcode before doing anything else. */ 199 ucode_load_ap(cpu); 200 201 /* Initialize the PCPU area. */ 202 pc = bootpcpu; 203 pcpu_init(pc, cpu, sizeof(struct pcpu)); 204 dpcpu_init(dpcpu, cpu); 205 pc->pc_apic_id = cpu_apic_ids[cpu]; 206 pc->pc_prvspace = pc; 207 pc->pc_curthread = 0; 208 pc->pc_tssp = &pc->pc_common_tss; 209 pc->pc_rsp0 = 0; 210 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack + 211 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful); 212 gdt = pc->pc_gdt; 213 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL]; 214 pc->pc_fs32p = &gdt[GUFS32_SEL]; 215 pc->pc_gs32p = &gdt[GUGS32_SEL]; 216 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]; 217 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK; 218 /* See comment in pmap_bootstrap(). */ 219 pc->pc_pcid_next = PMAP_PCID_KERN + 2; 220 pc->pc_pcid_gen = 1; 221 222 pc->pc_smp_tlb_gen = 1; 223 224 /* Init tss */ 225 pc->pc_common_tss = __pcpu[0].pc_common_tss; 226 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) + 227 IOPERM_BITMAP_SIZE; 228 pc->pc_common_tss.tss_rsp0 = 0; 229 230 /* The doublefault stack runs on IST1. */ 231 np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1; 232 np->np_pcpu = (register_t)pc; 233 pc->pc_common_tss.tss_ist1 = (long)np; 234 235 /* The NMI stack runs on IST2. */ 236 np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1; 237 np->np_pcpu = (register_t)pc; 238 pc->pc_common_tss.tss_ist2 = (long)np; 239 240 /* The MC# stack runs on IST3. */ 241 np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1; 242 np->np_pcpu = (register_t)pc; 243 pc->pc_common_tss.tss_ist3 = (long)np; 244 245 /* The DB# stack runs on IST4. */ 246 np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1; 247 np->np_pcpu = (register_t)pc; 248 pc->pc_common_tss.tss_ist4 = (long)np; 249 250 /* Prepare private GDT */ 251 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss; 252 for (x = 0; x < NGDT; x++) { 253 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 && 254 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1) 255 ssdtosd(&gdt_segs[x], &gdt[x]); 256 } 257 ssdtosyssd(&gdt_segs[GPROC0_SEL], 258 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 259 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 260 ap_gdt.rd_base = (u_long)gdt; 261 lgdt(&ap_gdt); /* does magic intra-segment return */ 262 263 wrmsr(MSR_FSBASE, 0); /* User value */ 264 wrmsr(MSR_GSBASE, (uint64_t)pc); 265 wrmsr(MSR_KGSBASE, 0); /* User value */ 266 fix_cpuid(); 267 268 lidt(&r_idt); 269 270 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 271 ltr(gsel_tss); 272 273 /* 274 * Set to a known state: 275 * Set by mpboot.s: CR0_PG, CR0_PE 276 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 277 */ 278 cr0 = rcr0(); 279 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 280 load_cr0(cr0); 281 282 amd64_conf_fast_syscall(); 283 284 /* signal our startup to the BSP. */ 285 mp_naps++; 286 287 /* Spin until the BSP releases the AP's. */ 288 while (atomic_load_acq_int(&aps_ready) == 0) 289 ia32_pause(); 290 291 init_secondary_tail(); 292 } 293 294 /******************************************************************* 295 * local functions and data 296 */ 297 298 #ifdef NUMA 299 static void 300 mp_realloc_pcpu(int cpuid, int domain) 301 { 302 vm_page_t m; 303 vm_offset_t oa, na; 304 305 oa = (vm_offset_t)&__pcpu[cpuid]; 306 if (vm_phys_domain(pmap_kextract(oa)) == domain) 307 return; 308 m = vm_page_alloc_noobj_domain(domain, 0); 309 if (m == NULL) 310 return; 311 na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 312 pagecopy((void *)oa, (void *)na); 313 pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1); 314 /* XXX old pcpu page leaked. */ 315 } 316 #endif 317 318 /* 319 * start each AP in our list 320 */ 321 int 322 start_all_aps(void) 323 { 324 vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4]; 325 pml5_entry_t old_pml45; 326 pml4_entry_t *v_pml4; 327 pdp_entry_t *v_pdp; 328 pd_entry_t *v_pd; 329 vm_paddr_t boot_address; 330 u_int32_t mpbioswarmvec; 331 int apic_id, cpu, domain, i; 332 u_char mpbiosreason; 333 334 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 335 336 MPASS(bootMP_size <= PAGE_SIZE); 337 m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0, 338 (1ULL << 20), /* Trampoline should be below 1M for real mode */ 339 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 340 boot_address = VM_PAGE_TO_PHYS(m_boottramp); 341 342 /* Create a transient 1:1 mapping of low 4G */ 343 if (la57) { 344 m_pml4 = pmap_page_alloc_below_4g(true); 345 v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4)); 346 } else { 347 v_pml4 = &kernel_pmap->pm_pmltop[0]; 348 } 349 m_pdp = pmap_page_alloc_below_4g(true); 350 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp)); 351 m_pd[0] = pmap_page_alloc_below_4g(false); 352 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0])); 353 for (i = 0; i < NPDEPG; i++) 354 v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A | 355 X86_PG_M | PG_PS; 356 m_pd[1] = pmap_page_alloc_below_4g(false); 357 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1])); 358 for (i = 0; i < NPDEPG; i++) 359 v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW | 360 X86_PG_A | X86_PG_M | PG_PS; 361 m_pd[2] = pmap_page_alloc_below_4g(false); 362 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2])); 363 for (i = 0; i < NPDEPG; i++) 364 v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V | 365 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS; 366 m_pd[3] = pmap_page_alloc_below_4g(false); 367 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3])); 368 for (i = 0; i < NPDEPG; i++) 369 v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V | 370 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS; 371 v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V | 372 X86_PG_RW | X86_PG_A | X86_PG_M; 373 v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V | 374 X86_PG_RW | X86_PG_A | X86_PG_M; 375 v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V | 376 X86_PG_RW | X86_PG_A | X86_PG_M; 377 v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V | 378 X86_PG_RW | X86_PG_A | X86_PG_M; 379 old_pml45 = kernel_pmap->pm_pmltop[0]; 380 if (la57) { 381 kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) | 382 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; 383 } 384 v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | 385 X86_PG_RW | X86_PG_A | X86_PG_M; 386 pmap_invalidate_all(kernel_pmap); 387 388 /* copy the AP 1st level boot code */ 389 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size); 390 if (bootverbose) 391 printf("AP boot address %#lx\n", boot_address); 392 393 /* save the current value of the warm-start vector */ 394 if (!efi_boot) 395 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 396 outb(CMOS_REG, BIOS_RESET); 397 mpbiosreason = inb(CMOS_DATA); 398 399 /* setup a vector to our boot code */ 400 if (!efi_boot) { 401 *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET; 402 *((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4); 403 } 404 outb(CMOS_REG, BIOS_RESET); 405 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 406 407 /* Relocate pcpu areas to the correct domain. */ 408 #ifdef NUMA 409 if (vm_ndomains > 1) 410 for (cpu = 1; cpu < mp_ncpus; cpu++) { 411 apic_id = cpu_apic_ids[cpu]; 412 domain = acpi_pxm_get_cpu_locality(apic_id); 413 mp_realloc_pcpu(cpu, domain); 414 } 415 #endif 416 417 /* start each AP */ 418 domain = 0; 419 for (cpu = 1; cpu < mp_ncpus; cpu++) { 420 apic_id = cpu_apic_ids[cpu]; 421 #ifdef NUMA 422 if (vm_ndomains > 1) 423 domain = acpi_pxm_get_cpu_locality(apic_id); 424 #endif 425 /* allocate and set up an idle stack data page */ 426 bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE, 427 M_WAITOK | M_ZERO); 428 doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE, 429 M_WAITOK | M_ZERO); 430 mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE, 431 M_WAITOK | M_ZERO); 432 nmi_stack = (char *)kmem_malloc_domainset( 433 DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO); 434 dbg_stack = (char *)kmem_malloc_domainset( 435 DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO); 436 dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain), 437 DPCPU_SIZE, M_WAITOK | M_ZERO); 438 439 bootpcpu = &__pcpu[cpu]; 440 bootSTK = (char *)bootstacks[cpu] + 441 kstack_pages * PAGE_SIZE - 8; 442 bootAP = cpu; 443 444 /* attempt to start the Application Processor */ 445 if (!start_ap(apic_id, boot_address)) { 446 /* restore the warmstart vector */ 447 if (!efi_boot) 448 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec; 449 panic("AP #%d (PHY# %d) failed!", cpu, apic_id); 450 } 451 452 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */ 453 } 454 455 /* restore the warmstart vector */ 456 if (!efi_boot) 457 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec; 458 459 outb(CMOS_REG, BIOS_RESET); 460 outb(CMOS_DATA, mpbiosreason); 461 462 /* Destroy transient 1:1 mapping */ 463 kernel_pmap->pm_pmltop[0] = old_pml45; 464 invlpg(0); 465 if (la57) 466 vm_page_free(m_pml4); 467 vm_page_free(m_pd[3]); 468 vm_page_free(m_pd[2]); 469 vm_page_free(m_pd[1]); 470 vm_page_free(m_pd[0]); 471 vm_page_free(m_pdp); 472 vm_page_free(m_boottramp); 473 474 /* number of APs actually started */ 475 return (mp_naps); 476 } 477 478 /* 479 * This function starts the AP (application processor) identified 480 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 481 * to accomplish this. This is necessary because of the nuances 482 * of the different hardware we might encounter. It isn't pretty, 483 * but it seems to work. 484 */ 485 static int 486 start_ap(int apic_id, vm_paddr_t boot_address) 487 { 488 int vector, ms; 489 int cpus; 490 491 /* calculate the vector */ 492 vector = (boot_address >> 12) & 0xff; 493 494 /* used as a watchpoint to signal AP startup */ 495 cpus = mp_naps; 496 497 ipi_startup(apic_id, vector); 498 499 /* Wait up to 5 seconds for it to start. */ 500 for (ms = 0; ms < 5000; ms++) { 501 if (mp_naps > cpus) 502 return 1; /* return SUCCESS */ 503 DELAY(1000); 504 } 505 return 0; /* return FAILURE */ 506 } 507 508 /* 509 * Flush the TLB on other CPU's 510 */ 511 512 /* 513 * Invalidation request. PCPU pc_smp_tlb_op uses u_int instead of the 514 * enum to avoid both namespace and ABI issues (with enums). 515 */ 516 enum invl_op_codes { 517 INVL_OP_TLB = 1, 518 INVL_OP_TLB_INVPCID = 2, 519 INVL_OP_TLB_INVPCID_PTI = 3, 520 INVL_OP_TLB_PCID = 4, 521 INVL_OP_PGRNG = 5, 522 INVL_OP_PGRNG_INVPCID = 6, 523 INVL_OP_PGRNG_PCID = 7, 524 INVL_OP_PG = 8, 525 INVL_OP_PG_INVPCID = 9, 526 INVL_OP_PG_PCID = 10, 527 INVL_OP_CACHE = 11, 528 }; 529 530 /* 531 * These variables are initialized at startup to reflect how each of 532 * the different kinds of invalidations should be performed on the 533 * current machine and environment. 534 */ 535 static enum invl_op_codes invl_op_tlb; 536 static enum invl_op_codes invl_op_pgrng; 537 static enum invl_op_codes invl_op_pg; 538 539 /* 540 * Scoreboard of IPI completion notifications from target to IPI initiator. 541 * 542 * Each CPU can initiate shootdown IPI independently from other CPUs. 543 * Initiator enters critical section, then fills its local PCPU 544 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation 545 * at location (cpu, my_cpuid) for each target cpu. After that IPI is 546 * sent to all targets which scan for zeroed scoreboard generation 547 * words. Upon finding such word the shootdown data is read from 548 * corresponding cpu's pcpu, and generation is set. Meantime initiator 549 * loops waiting for all zeroed generations in scoreboard to update. 550 */ 551 static uint32_t *invl_scoreboard; 552 553 static void 554 invl_scoreboard_init(void *arg __unused) 555 { 556 u_int i; 557 558 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) * 559 (mp_maxid + 1), M_DEVBUF, M_WAITOK); 560 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++) 561 invl_scoreboard[i] = 1; 562 563 if (pmap_pcid_enabled) { 564 if (invpcid_works) { 565 if (pti) 566 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI; 567 else 568 invl_op_tlb = INVL_OP_TLB_INVPCID; 569 invl_op_pgrng = INVL_OP_PGRNG_INVPCID; 570 invl_op_pg = INVL_OP_PG_INVPCID; 571 } else { 572 invl_op_tlb = INVL_OP_TLB_PCID; 573 invl_op_pgrng = INVL_OP_PGRNG_PCID; 574 invl_op_pg = INVL_OP_PG_PCID; 575 } 576 } else { 577 invl_op_tlb = INVL_OP_TLB; 578 invl_op_pgrng = INVL_OP_PGRNG; 579 invl_op_pg = INVL_OP_PG; 580 } 581 } 582 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL); 583 584 static uint32_t * 585 invl_scoreboard_getcpu(u_int cpu) 586 { 587 return (invl_scoreboard + cpu * (mp_maxid + 1)); 588 } 589 590 static uint32_t * 591 invl_scoreboard_slot(u_int cpu) 592 { 593 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid)); 594 } 595 596 /* 597 * Used by the pmap to request cache or TLB invalidation on local and 598 * remote processors. Mask provides the set of remote CPUs that are 599 * to be signalled with the invalidation IPI. As an optimization, the 600 * curcpu_cb callback is invoked on the calling CPU in a critical 601 * section while waiting for the remote CPUs to complete the operation. 602 * 603 * The callback function is called unconditionally on the caller's 604 * underlying processor, even when this processor is not set in the 605 * mask. So, the callback function must be prepared to handle such 606 * spurious invocations. 607 * 608 * Interrupts must be enabled when calling the function with smp 609 * started, to avoid deadlock with other IPIs that are protected with 610 * smp_ipi_mtx spinlock at the initiator side. 611 * 612 * Function must be called with the thread pinned, and it unpins on 613 * completion. 614 */ 615 static void 616 smp_targeted_tlb_shootdown(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2, 617 smp_invl_cb_t curcpu_cb, enum invl_op_codes op) 618 { 619 cpuset_t mask; 620 uint32_t generation, *p_cpudone; 621 int cpu; 622 bool is_all; 623 624 /* 625 * It is not necessary to signal other CPUs while booting or 626 * when in the debugger. 627 */ 628 if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started)) 629 goto local_cb; 630 631 KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); 632 633 /* 634 * Make a stable copy of the set of CPUs on which the pmap is active. 635 * See if we have to interrupt other CPUs. 636 */ 637 CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask); 638 is_all = CPU_CMP(&mask, &all_cpus) == 0; 639 CPU_CLR(curcpu, &mask); 640 if (CPU_EMPTY(&mask)) 641 goto local_cb; 642 643 /* 644 * Initiator must have interrupts enabled, which prevents 645 * non-invalidation IPIs that take smp_ipi_mtx spinlock, 646 * from deadlocking with us. On the other hand, preemption 647 * must be disabled to pin initiator to the instance of the 648 * pcpu pc_smp_tlb data and scoreboard line. 649 */ 650 KASSERT((read_rflags() & PSL_I) != 0, 651 ("smp_targeted_tlb_shootdown: interrupts disabled")); 652 critical_enter(); 653 654 PCPU_SET(smp_tlb_addr1, addr1); 655 PCPU_SET(smp_tlb_addr2, addr2); 656 PCPU_SET(smp_tlb_pmap, pmap); 657 generation = PCPU_GET(smp_tlb_gen); 658 if (++generation == 0) 659 generation = 1; 660 PCPU_SET(smp_tlb_gen, generation); 661 PCPU_SET(smp_tlb_op, op); 662 /* Fence between filling smp_tlb fields and clearing scoreboard. */ 663 atomic_thread_fence_rel(); 664 665 CPU_FOREACH_ISSET(cpu, &mask) { 666 KASSERT(*invl_scoreboard_slot(cpu) != 0, 667 ("IPI scoreboard is zero, initiator %d target %d", 668 curcpu, cpu)); 669 *invl_scoreboard_slot(cpu) = 0; 670 } 671 672 /* 673 * IPI acts as a fence between writing to the scoreboard above 674 * (zeroing slot) and reading from it below (wait for 675 * acknowledgment). 676 */ 677 if (is_all) { 678 ipi_all_but_self(IPI_INVLOP); 679 } else { 680 ipi_selected(mask, IPI_INVLOP); 681 } 682 curcpu_cb(pmap, addr1, addr2); 683 CPU_FOREACH_ISSET(cpu, &mask) { 684 p_cpudone = invl_scoreboard_slot(cpu); 685 while (atomic_load_int(p_cpudone) != generation) 686 ia32_pause(); 687 } 688 689 /* 690 * Unpin before leaving critical section. If the thread owes 691 * preemption, this allows scheduler to select thread on any 692 * CPU from its cpuset. 693 */ 694 sched_unpin(); 695 critical_exit(); 696 697 return; 698 699 local_cb: 700 critical_enter(); 701 curcpu_cb(pmap, addr1, addr2); 702 sched_unpin(); 703 critical_exit(); 704 } 705 706 void 707 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb) 708 { 709 smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb); 710 #ifdef COUNT_XINVLTLB_HITS 711 ipi_global++; 712 #endif 713 } 714 715 void 716 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb) 717 { 718 smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg); 719 #ifdef COUNT_XINVLTLB_HITS 720 ipi_page++; 721 #endif 722 } 723 724 void 725 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap, 726 smp_invl_cb_t curcpu_cb) 727 { 728 smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb, 729 invl_op_pgrng); 730 #ifdef COUNT_XINVLTLB_HITS 731 ipi_range++; 732 ipi_range_size += (addr2 - addr1) / PAGE_SIZE; 733 #endif 734 } 735 736 void 737 smp_cache_flush(smp_invl_cb_t curcpu_cb) 738 { 739 smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE); 740 } 741 742 /* 743 * Handlers for TLB related IPIs 744 */ 745 static void 746 invltlb_handler(pmap_t smp_tlb_pmap) 747 { 748 #ifdef COUNT_XINVLTLB_HITS 749 xhits_gbl[PCPU_GET(cpuid)]++; 750 #endif /* COUNT_XINVLTLB_HITS */ 751 #ifdef COUNT_IPIS 752 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++; 753 #endif /* COUNT_IPIS */ 754 755 if (smp_tlb_pmap == kernel_pmap) 756 invltlb_glob(); 757 else 758 invltlb(); 759 } 760 761 static void 762 invltlb_invpcid_handler(pmap_t smp_tlb_pmap) 763 { 764 struct invpcid_descr d; 765 766 #ifdef COUNT_XINVLTLB_HITS 767 xhits_gbl[PCPU_GET(cpuid)]++; 768 #endif /* COUNT_XINVLTLB_HITS */ 769 #ifdef COUNT_IPIS 770 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++; 771 #endif /* COUNT_IPIS */ 772 773 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid; 774 d.pad = 0; 775 d.addr = 0; 776 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB : 777 INVPCID_CTX); 778 } 779 780 static void 781 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap) 782 { 783 struct invpcid_descr d; 784 785 #ifdef COUNT_XINVLTLB_HITS 786 xhits_gbl[PCPU_GET(cpuid)]++; 787 #endif /* COUNT_XINVLTLB_HITS */ 788 #ifdef COUNT_IPIS 789 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++; 790 #endif /* COUNT_IPIS */ 791 792 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid; 793 d.pad = 0; 794 d.addr = 0; 795 if (smp_tlb_pmap == kernel_pmap) { 796 /* 797 * This invalidation actually needs to clear kernel 798 * mappings from the TLB in the current pmap, but 799 * since we were asked for the flush in the kernel 800 * pmap, achieve it by performing global flush. 801 */ 802 invpcid(&d, INVPCID_CTXGLOB); 803 } else { 804 invpcid(&d, INVPCID_CTX); 805 if (smp_tlb_pmap == PCPU_GET(curpmap) && 806 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) 807 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE); 808 } 809 } 810 811 static void 812 invltlb_pcid_handler(pmap_t smp_tlb_pmap) 813 { 814 uint32_t pcid; 815 816 #ifdef COUNT_XINVLTLB_HITS 817 xhits_gbl[PCPU_GET(cpuid)]++; 818 #endif /* COUNT_XINVLTLB_HITS */ 819 #ifdef COUNT_IPIS 820 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++; 821 #endif /* COUNT_IPIS */ 822 823 if (smp_tlb_pmap == kernel_pmap) { 824 invltlb_glob(); 825 } else { 826 /* 827 * The current pmap might not be equal to 828 * smp_tlb_pmap. The clearing of the pm_gen in 829 * pmap_invalidate_all() takes care of TLB 830 * invalidation when switching to the pmap on this 831 * CPU. 832 */ 833 if (smp_tlb_pmap == PCPU_GET(curpmap)) { 834 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid; 835 load_cr3(smp_tlb_pmap->pm_cr3 | pcid); 836 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) 837 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE); 838 } 839 } 840 } 841 842 static void 843 invlpg_handler(vm_offset_t smp_tlb_addr1) 844 { 845 #ifdef COUNT_XINVLTLB_HITS 846 xhits_pg[PCPU_GET(cpuid)]++; 847 #endif /* COUNT_XINVLTLB_HITS */ 848 #ifdef COUNT_IPIS 849 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++; 850 #endif /* COUNT_IPIS */ 851 852 invlpg(smp_tlb_addr1); 853 } 854 855 static void 856 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1) 857 { 858 struct invpcid_descr d; 859 860 #ifdef COUNT_XINVLTLB_HITS 861 xhits_pg[PCPU_GET(cpuid)]++; 862 #endif /* COUNT_XINVLTLB_HITS */ 863 #ifdef COUNT_IPIS 864 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++; 865 #endif /* COUNT_IPIS */ 866 867 invlpg(smp_tlb_addr1); 868 if (smp_tlb_pmap == PCPU_GET(curpmap) && 869 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 && 870 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) { 871 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid | 872 PMAP_PCID_USER_PT; 873 d.pad = 0; 874 d.addr = smp_tlb_addr1; 875 invpcid(&d, INVPCID_ADDR); 876 } 877 } 878 879 static void 880 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1) 881 { 882 uint64_t kcr3, ucr3; 883 uint32_t pcid; 884 885 #ifdef COUNT_XINVLTLB_HITS 886 xhits_pg[PCPU_GET(cpuid)]++; 887 #endif /* COUNT_XINVLTLB_HITS */ 888 #ifdef COUNT_IPIS 889 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++; 890 #endif /* COUNT_IPIS */ 891 892 invlpg(smp_tlb_addr1); 893 if (smp_tlb_pmap == PCPU_GET(curpmap) && 894 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 && 895 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) { 896 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid; 897 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE; 898 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE; 899 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1); 900 } 901 } 902 903 static void 904 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2) 905 { 906 vm_offset_t addr, addr2; 907 908 #ifdef COUNT_XINVLTLB_HITS 909 xhits_rng[PCPU_GET(cpuid)]++; 910 #endif /* COUNT_XINVLTLB_HITS */ 911 #ifdef COUNT_IPIS 912 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++; 913 #endif /* COUNT_IPIS */ 914 915 addr = smp_tlb_addr1; 916 addr2 = smp_tlb_addr2; 917 do { 918 invlpg(addr); 919 addr += PAGE_SIZE; 920 } while (addr < addr2); 921 } 922 923 static void 924 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1, 925 vm_offset_t smp_tlb_addr2) 926 { 927 struct invpcid_descr d; 928 vm_offset_t addr, addr2; 929 930 #ifdef COUNT_XINVLTLB_HITS 931 xhits_rng[PCPU_GET(cpuid)]++; 932 #endif /* COUNT_XINVLTLB_HITS */ 933 #ifdef COUNT_IPIS 934 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++; 935 #endif /* COUNT_IPIS */ 936 937 addr = smp_tlb_addr1; 938 addr2 = smp_tlb_addr2; 939 do { 940 invlpg(addr); 941 addr += PAGE_SIZE; 942 } while (addr < addr2); 943 if (smp_tlb_pmap == PCPU_GET(curpmap) && 944 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 && 945 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) { 946 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid | 947 PMAP_PCID_USER_PT; 948 d.pad = 0; 949 d.addr = smp_tlb_addr1; 950 do { 951 invpcid(&d, INVPCID_ADDR); 952 d.addr += PAGE_SIZE; 953 } while (d.addr < addr2); 954 } 955 } 956 957 static void 958 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1, 959 vm_offset_t smp_tlb_addr2) 960 { 961 vm_offset_t addr, addr2; 962 uint64_t kcr3, ucr3; 963 uint32_t pcid; 964 965 #ifdef COUNT_XINVLTLB_HITS 966 xhits_rng[PCPU_GET(cpuid)]++; 967 #endif /* COUNT_XINVLTLB_HITS */ 968 #ifdef COUNT_IPIS 969 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++; 970 #endif /* COUNT_IPIS */ 971 972 addr = smp_tlb_addr1; 973 addr2 = smp_tlb_addr2; 974 do { 975 invlpg(addr); 976 addr += PAGE_SIZE; 977 } while (addr < addr2); 978 if (smp_tlb_pmap == PCPU_GET(curpmap) && 979 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 && 980 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) { 981 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid; 982 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE; 983 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE; 984 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2); 985 } 986 } 987 988 static void 989 invlcache_handler(void) 990 { 991 #ifdef COUNT_IPIS 992 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++; 993 #endif /* COUNT_IPIS */ 994 wbinvd(); 995 } 996 997 static void 998 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap, 999 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2) 1000 { 1001 switch (smp_tlb_op) { 1002 case INVL_OP_TLB: 1003 invltlb_handler(smp_tlb_pmap); 1004 break; 1005 case INVL_OP_TLB_INVPCID: 1006 invltlb_invpcid_handler(smp_tlb_pmap); 1007 break; 1008 case INVL_OP_TLB_INVPCID_PTI: 1009 invltlb_invpcid_pti_handler(smp_tlb_pmap); 1010 break; 1011 case INVL_OP_TLB_PCID: 1012 invltlb_pcid_handler(smp_tlb_pmap); 1013 break; 1014 case INVL_OP_PGRNG: 1015 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2); 1016 break; 1017 case INVL_OP_PGRNG_INVPCID: 1018 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1, 1019 smp_tlb_addr2); 1020 break; 1021 case INVL_OP_PGRNG_PCID: 1022 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1, 1023 smp_tlb_addr2); 1024 break; 1025 case INVL_OP_PG: 1026 invlpg_handler(smp_tlb_addr1); 1027 break; 1028 case INVL_OP_PG_INVPCID: 1029 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1); 1030 break; 1031 case INVL_OP_PG_PCID: 1032 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1); 1033 break; 1034 case INVL_OP_CACHE: 1035 invlcache_handler(); 1036 break; 1037 default: 1038 __assert_unreachable(); 1039 break; 1040 } 1041 } 1042 1043 void 1044 invlop_handler(void) 1045 { 1046 struct pcpu *initiator_pc; 1047 pmap_t smp_tlb_pmap; 1048 vm_offset_t smp_tlb_addr1, smp_tlb_addr2; 1049 u_int initiator_cpu_id; 1050 enum invl_op_codes smp_tlb_op; 1051 uint32_t *scoreboard, smp_tlb_gen; 1052 1053 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid)); 1054 for (;;) { 1055 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid; 1056 initiator_cpu_id++) { 1057 if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0) 1058 break; 1059 } 1060 if (initiator_cpu_id > mp_maxid) 1061 break; 1062 initiator_pc = cpuid_to_pcpu[initiator_cpu_id]; 1063 1064 /* 1065 * This acquire fence and its corresponding release 1066 * fence in smp_targeted_tlb_shootdown() is between 1067 * reading zero scoreboard slot and accessing PCPU of 1068 * initiator for pc_smp_tlb values. 1069 */ 1070 atomic_thread_fence_acq(); 1071 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap; 1072 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1; 1073 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2; 1074 smp_tlb_op = initiator_pc->pc_smp_tlb_op; 1075 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen; 1076 1077 /* 1078 * Ensure that we do not make our scoreboard 1079 * notification visible to the initiator until the 1080 * pc_smp_tlb values are read. The corresponding 1081 * fence is implicitly provided by the barrier in the 1082 * IPI send operation before the APIC ICR register 1083 * write. 1084 * 1085 * As an optimization, the request is acknowledged 1086 * before the actual invalidation is performed. It is 1087 * safe because target CPU cannot return to userspace 1088 * before handler finishes. Only NMI can preempt the 1089 * handler, but NMI would see the kernel handler frame 1090 * and not touch not-invalidated user page table. 1091 */ 1092 atomic_thread_fence_acq(); 1093 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen); 1094 1095 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1, 1096 smp_tlb_addr2); 1097 } 1098 } 1099