1 /*- 2 * Copyright (c) 2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include "opt_acpi.h" 29 #include "opt_platform.h" 30 #include "opt_ddb.h" 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/buf.h> 38 #include <sys/bus.h> 39 #include <sys/cons.h> 40 #include <sys/cpu.h> 41 #include <sys/csan.h> 42 #include <sys/devmap.h> 43 #include <sys/efi.h> 44 #include <sys/exec.h> 45 #include <sys/imgact.h> 46 #include <sys/kdb.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/limits.h> 50 #include <sys/linker.h> 51 #include <sys/msgbuf.h> 52 #include <sys/pcpu.h> 53 #include <sys/physmem.h> 54 #include <sys/proc.h> 55 #include <sys/ptrace.h> 56 #include <sys/reboot.h> 57 #include <sys/reg.h> 58 #include <sys/rwlock.h> 59 #include <sys/sched.h> 60 #include <sys/signalvar.h> 61 #include <sys/syscallsubr.h> 62 #include <sys/sysent.h> 63 #include <sys/sysproto.h> 64 #include <sys/ucontext.h> 65 #include <sys/vdso.h> 66 #include <sys/vmmeter.h> 67 68 #include <vm/vm.h> 69 #include <vm/vm_param.h> 70 #include <vm/vm_kern.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_phys.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_pager.h> 77 78 #include <machine/armreg.h> 79 #include <machine/cpu.h> 80 #include <machine/debug_monitor.h> 81 #include <machine/kdb.h> 82 #include <machine/machdep.h> 83 #include <machine/metadata.h> 84 #include <machine/md_var.h> 85 #include <machine/pcb.h> 86 #include <machine/undefined.h> 87 #include <machine/vmparam.h> 88 89 #ifdef VFP 90 #include <machine/vfp.h> 91 #endif 92 93 #ifdef DEV_ACPI 94 #include <contrib/dev/acpica/include/acpi.h> 95 #include <machine/acpica_machdep.h> 96 #endif 97 98 #ifdef FDT 99 #include <dev/fdt/fdt_common.h> 100 #include <dev/ofw/openfirm.h> 101 #endif 102 103 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE; 104 105 /* 106 * XXX: The .bss is assumed to be in the boot CPU NUMA domain. If not we 107 * could relocate this, but will need to keep the same virtual address as 108 * it's reverenced by the EARLY_COUNTER macro. 109 */ 110 struct pcpu pcpu0; 111 112 #if defined(PERTHREAD_SSP) 113 /* 114 * The boot SSP canary. Will be replaced with a per-thread canary when 115 * scheduling has started. 116 */ 117 uintptr_t boot_canary = 0x49a2d892bc05a0b1ul; 118 #endif 119 120 static struct trapframe proc0_tf; 121 122 int early_boot = 1; 123 int cold = 1; 124 static int boot_el; 125 126 struct kva_md_info kmi; 127 128 int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */ 129 int has_pan; 130 131 /* 132 * Physical address of the EFI System Table. Stashed from the metadata hints 133 * passed into the kernel and used by the EFI code to call runtime services. 134 */ 135 vm_paddr_t efi_systbl_phys; 136 static struct efi_map_header *efihdr; 137 138 /* pagezero_* implementations are provided in support.S */ 139 void pagezero_simple(void *); 140 void pagezero_cache(void *); 141 142 /* pagezero_simple is default pagezero */ 143 void (*pagezero)(void *p) = pagezero_simple; 144 145 int (*apei_nmi)(void); 146 147 #if defined(PERTHREAD_SSP_WARNING) 148 static void 149 print_ssp_warning(void *data __unused) 150 { 151 printf("WARNING: Per-thread SSP is enabled but the compiler is too old to support it\n"); 152 } 153 SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL); 154 SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL); 155 #endif 156 157 static void 158 pan_setup(void) 159 { 160 uint64_t id_aa64mfr1; 161 162 id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); 163 if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE) 164 has_pan = 1; 165 } 166 167 void 168 pan_enable(void) 169 { 170 171 /* 172 * The LLVM integrated assembler doesn't understand the PAN 173 * PSTATE field. Because of this we need to manually create 174 * the instruction in an asm block. This is equivalent to: 175 * msr pan, #1 176 * 177 * This sets the PAN bit, stopping the kernel from accessing 178 * memory when userspace can also access it unless the kernel 179 * uses the userspace load/store instructions. 180 */ 181 if (has_pan) { 182 WRITE_SPECIALREG(sctlr_el1, 183 READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN); 184 __asm __volatile(".inst 0xd500409f | (0x1 << 8)"); 185 } 186 } 187 188 bool 189 has_hyp(void) 190 { 191 192 return (boot_el == 2); 193 } 194 195 static void 196 cpu_startup(void *dummy) 197 { 198 vm_paddr_t size; 199 int i; 200 201 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem), 202 ptoa((uintmax_t)realmem) / 1024 / 1024); 203 204 if (bootverbose) { 205 printf("Physical memory chunk(s):\n"); 206 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 207 size = phys_avail[i + 1] - phys_avail[i]; 208 printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n", 209 (uintmax_t)phys_avail[i], 210 (uintmax_t)phys_avail[i + 1] - 1, 211 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 212 } 213 } 214 215 printf("avail memory = %ju (%ju MB)\n", 216 ptoa((uintmax_t)vm_free_count()), 217 ptoa((uintmax_t)vm_free_count()) / 1024 / 1024); 218 219 undef_init(); 220 install_cpu_errata(); 221 222 vm_ksubmap_init(&kmi); 223 bufinit(); 224 vm_pager_bufferinit(); 225 } 226 227 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 228 229 static void 230 late_ifunc_resolve(void *dummy __unused) 231 { 232 link_elf_late_ireloc(); 233 } 234 SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL); 235 236 int 237 cpu_idle_wakeup(int cpu) 238 { 239 240 return (0); 241 } 242 243 void 244 cpu_idle(int busy) 245 { 246 247 spinlock_enter(); 248 if (!busy) 249 cpu_idleclock(); 250 if (!sched_runnable()) 251 __asm __volatile( 252 "dsb sy \n" 253 "wfi \n"); 254 if (!busy) 255 cpu_activeclock(); 256 spinlock_exit(); 257 } 258 259 void 260 cpu_halt(void) 261 { 262 263 /* We should have shutdown by now, if not enter a low power sleep */ 264 intr_disable(); 265 while (1) { 266 __asm __volatile("wfi"); 267 } 268 } 269 270 /* 271 * Flush the D-cache for non-DMA I/O so that the I-cache can 272 * be made coherent later. 273 */ 274 void 275 cpu_flush_dcache(void *ptr, size_t len) 276 { 277 278 /* ARM64TODO TBD */ 279 } 280 281 /* Get current clock frequency for the given CPU ID. */ 282 int 283 cpu_est_clockrate(int cpu_id, uint64_t *rate) 284 { 285 struct pcpu *pc; 286 287 pc = pcpu_find(cpu_id); 288 if (pc == NULL || rate == NULL) 289 return (EINVAL); 290 291 if (pc->pc_clock == 0) 292 return (EOPNOTSUPP); 293 294 *rate = pc->pc_clock; 295 return (0); 296 } 297 298 void 299 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 300 { 301 302 pcpu->pc_acpi_id = 0xffffffff; 303 pcpu->pc_mpidr_low = 0xffffffff; 304 pcpu->pc_mpidr_high = 0xffffffff; 305 } 306 307 void 308 spinlock_enter(void) 309 { 310 struct thread *td; 311 register_t daif; 312 313 td = curthread; 314 if (td->td_md.md_spinlock_count == 0) { 315 daif = intr_disable(); 316 td->td_md.md_spinlock_count = 1; 317 td->td_md.md_saved_daif = daif; 318 critical_enter(); 319 } else 320 td->td_md.md_spinlock_count++; 321 } 322 323 void 324 spinlock_exit(void) 325 { 326 struct thread *td; 327 register_t daif; 328 329 td = curthread; 330 daif = td->td_md.md_saved_daif; 331 td->td_md.md_spinlock_count--; 332 if (td->td_md.md_spinlock_count == 0) { 333 critical_exit(); 334 intr_restore(daif); 335 } 336 } 337 338 /* 339 * Construct a PCB from a trapframe. This is called from kdb_trap() where 340 * we want to start a backtrace from the function that caused us to enter 341 * the debugger. We have the context in the trapframe, but base the trace 342 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 343 * enough for a backtrace. 344 */ 345 void 346 makectx(struct trapframe *tf, struct pcb *pcb) 347 { 348 int i; 349 350 for (i = 0; i < nitems(pcb->pcb_x); i++) 351 pcb->pcb_x[i] = tf->tf_x[i]; 352 353 /* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */ 354 pcb->pcb_lr = tf->tf_elr; 355 pcb->pcb_sp = tf->tf_sp; 356 } 357 358 static void 359 init_proc0(vm_offset_t kstack) 360 { 361 struct pcpu *pcpup; 362 363 pcpup = cpuid_to_pcpu[0]; 364 MPASS(pcpup != NULL); 365 366 proc_linkup0(&proc0, &thread0); 367 thread0.td_kstack = kstack; 368 thread0.td_kstack_pages = KSTACK_PAGES; 369 #if defined(PERTHREAD_SSP) 370 thread0.td_md.md_canary = boot_canary; 371 #endif 372 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + 373 thread0.td_kstack_pages * PAGE_SIZE) - 1; 374 thread0.td_pcb->pcb_fpflags = 0; 375 thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate; 376 thread0.td_pcb->pcb_vfpcpu = UINT_MAX; 377 thread0.td_frame = &proc0_tf; 378 ptrauth_thread0(&thread0); 379 pcpup->pc_curpcb = thread0.td_pcb; 380 381 /* 382 * Unmask SError exceptions. They are used to signal a RAS failure, 383 * or other hardware error. 384 */ 385 serror_enable(); 386 } 387 388 /* 389 * Get an address to be used to write to kernel data that may be mapped 390 * read-only, e.g. to patch kernel code. 391 */ 392 bool 393 arm64_get_writable_addr(vm_offset_t addr, vm_offset_t *out) 394 { 395 vm_paddr_t pa; 396 397 /* Check if the page is writable */ 398 if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) { 399 *out = addr; 400 return (true); 401 } 402 403 /* 404 * Find the physical address of the given page. 405 */ 406 if (!pmap_klookup(addr, &pa)) { 407 return (false); 408 } 409 410 /* 411 * If it is within the DMAP region and is writable use that. 412 */ 413 if (PHYS_IN_DMAP(pa)) { 414 addr = PHYS_TO_DMAP(pa); 415 if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) { 416 *out = addr; 417 return (true); 418 } 419 } 420 421 return (false); 422 } 423 424 typedef void (*efi_map_entry_cb)(struct efi_md *); 425 426 static void 427 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb) 428 { 429 struct efi_md *map, *p; 430 size_t efisz; 431 int ndesc, i; 432 433 /* 434 * Memory map data provided by UEFI via the GetMemoryMap 435 * Boot Services API. 436 */ 437 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; 438 map = (struct efi_md *)((uint8_t *)efihdr + efisz); 439 440 if (efihdr->descriptor_size == 0) 441 return; 442 ndesc = efihdr->memory_size / efihdr->descriptor_size; 443 444 for (i = 0, p = map; i < ndesc; i++, 445 p = efi_next_descriptor(p, efihdr->descriptor_size)) { 446 cb(p); 447 } 448 } 449 450 static void 451 exclude_efi_map_entry(struct efi_md *p) 452 { 453 454 switch (p->md_type) { 455 case EFI_MD_TYPE_CODE: 456 case EFI_MD_TYPE_DATA: 457 case EFI_MD_TYPE_BS_CODE: 458 case EFI_MD_TYPE_BS_DATA: 459 case EFI_MD_TYPE_FREE: 460 /* 461 * We're allowed to use any entry with these types. 462 */ 463 break; 464 default: 465 physmem_exclude_region(p->md_phys, p->md_pages * EFI_PAGE_SIZE, 466 EXFLAG_NOALLOC); 467 } 468 } 469 470 static void 471 exclude_efi_map_entries(struct efi_map_header *efihdr) 472 { 473 474 foreach_efi_map_entry(efihdr, exclude_efi_map_entry); 475 } 476 477 static void 478 add_efi_map_entry(struct efi_md *p) 479 { 480 481 switch (p->md_type) { 482 case EFI_MD_TYPE_RECLAIM: 483 /* 484 * The recomended location for ACPI tables. Map into the 485 * DMAP so we can access them from userspace via /dev/mem. 486 */ 487 case EFI_MD_TYPE_RT_CODE: 488 /* 489 * Some UEFI implementations put the system table in the 490 * runtime code section. Include it in the DMAP, but will 491 * be excluded from phys_avail later. 492 */ 493 case EFI_MD_TYPE_RT_DATA: 494 /* 495 * Runtime data will be excluded after the DMAP 496 * region is created to stop it from being added 497 * to phys_avail. 498 */ 499 case EFI_MD_TYPE_CODE: 500 case EFI_MD_TYPE_DATA: 501 case EFI_MD_TYPE_BS_CODE: 502 case EFI_MD_TYPE_BS_DATA: 503 case EFI_MD_TYPE_FREE: 504 /* 505 * We're allowed to use any entry with these types. 506 */ 507 physmem_hardware_region(p->md_phys, 508 p->md_pages * EFI_PAGE_SIZE); 509 break; 510 } 511 } 512 513 static void 514 add_efi_map_entries(struct efi_map_header *efihdr) 515 { 516 517 foreach_efi_map_entry(efihdr, add_efi_map_entry); 518 } 519 520 static void 521 print_efi_map_entry(struct efi_md *p) 522 { 523 const char *type; 524 static const char *types[] = { 525 "Reserved", 526 "LoaderCode", 527 "LoaderData", 528 "BootServicesCode", 529 "BootServicesData", 530 "RuntimeServicesCode", 531 "RuntimeServicesData", 532 "ConventionalMemory", 533 "UnusableMemory", 534 "ACPIReclaimMemory", 535 "ACPIMemoryNVS", 536 "MemoryMappedIO", 537 "MemoryMappedIOPortSpace", 538 "PalCode", 539 "PersistentMemory" 540 }; 541 542 if (p->md_type < nitems(types)) 543 type = types[p->md_type]; 544 else 545 type = "<INVALID>"; 546 printf("%23s %012lx %012lx %08lx ", type, p->md_phys, 547 p->md_virt, p->md_pages); 548 if (p->md_attr & EFI_MD_ATTR_UC) 549 printf("UC "); 550 if (p->md_attr & EFI_MD_ATTR_WC) 551 printf("WC "); 552 if (p->md_attr & EFI_MD_ATTR_WT) 553 printf("WT "); 554 if (p->md_attr & EFI_MD_ATTR_WB) 555 printf("WB "); 556 if (p->md_attr & EFI_MD_ATTR_UCE) 557 printf("UCE "); 558 if (p->md_attr & EFI_MD_ATTR_WP) 559 printf("WP "); 560 if (p->md_attr & EFI_MD_ATTR_RP) 561 printf("RP "); 562 if (p->md_attr & EFI_MD_ATTR_XP) 563 printf("XP "); 564 if (p->md_attr & EFI_MD_ATTR_NV) 565 printf("NV "); 566 if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE) 567 printf("MORE_RELIABLE "); 568 if (p->md_attr & EFI_MD_ATTR_RO) 569 printf("RO "); 570 if (p->md_attr & EFI_MD_ATTR_RT) 571 printf("RUNTIME"); 572 printf("\n"); 573 } 574 575 static void 576 print_efi_map_entries(struct efi_map_header *efihdr) 577 { 578 579 printf("%23s %12s %12s %8s %4s\n", 580 "Type", "Physical", "Virtual", "#Pages", "Attr"); 581 foreach_efi_map_entry(efihdr, print_efi_map_entry); 582 } 583 584 #ifdef FDT 585 static void 586 try_load_dtb(caddr_t kmdp) 587 { 588 vm_offset_t dtbp; 589 590 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 591 #if defined(FDT_DTB_STATIC) 592 /* 593 * In case the device tree blob was not retrieved (from metadata) try 594 * to use the statically embedded one. 595 */ 596 if (dtbp == 0) 597 dtbp = (vm_offset_t)&fdt_static_dtb; 598 #endif 599 600 if (dtbp == (vm_offset_t)NULL) { 601 #ifndef TSLOG 602 printf("ERROR loading DTB\n"); 603 #endif 604 return; 605 } 606 607 if (OF_install(OFW_FDT, 0) == FALSE) 608 panic("Cannot install FDT"); 609 610 if (OF_init((void *)dtbp) != 0) 611 panic("OF_init failed with the found device tree"); 612 613 parse_fdt_bootargs(); 614 } 615 #endif 616 617 static bool 618 bus_probe(void) 619 { 620 bool has_acpi, has_fdt; 621 char *order, *env; 622 623 has_acpi = has_fdt = false; 624 625 #ifdef FDT 626 has_fdt = (OF_peer(0) != 0); 627 #endif 628 #ifdef DEV_ACPI 629 has_acpi = (AcpiOsGetRootPointer() != 0); 630 #endif 631 632 env = kern_getenv("kern.cfg.order"); 633 if (env != NULL) { 634 order = env; 635 while (order != NULL) { 636 if (has_acpi && 637 strncmp(order, "acpi", 4) == 0 && 638 (order[4] == ',' || order[4] == '\0')) { 639 arm64_bus_method = ARM64_BUS_ACPI; 640 break; 641 } 642 if (has_fdt && 643 strncmp(order, "fdt", 3) == 0 && 644 (order[3] == ',' || order[3] == '\0')) { 645 arm64_bus_method = ARM64_BUS_FDT; 646 break; 647 } 648 order = strchr(order, ','); 649 } 650 freeenv(env); 651 652 /* If we set the bus method it is valid */ 653 if (arm64_bus_method != ARM64_BUS_NONE) 654 return (true); 655 } 656 /* If no order or an invalid order was set use the default */ 657 if (arm64_bus_method == ARM64_BUS_NONE) { 658 if (has_fdt) 659 arm64_bus_method = ARM64_BUS_FDT; 660 else if (has_acpi) 661 arm64_bus_method = ARM64_BUS_ACPI; 662 } 663 664 /* 665 * If no option was set the default is valid, otherwise we are 666 * setting one to get cninit() working, then calling panic to tell 667 * the user about the invalid bus setup. 668 */ 669 return (env == NULL); 670 } 671 672 static void 673 cache_setup(void) 674 { 675 int dczva_line_shift; 676 uint32_t dczid_el0; 677 678 identify_cache(READ_SPECIALREG(ctr_el0)); 679 680 dczid_el0 = READ_SPECIALREG(dczid_el0); 681 682 /* Check if dc zva is not prohibited */ 683 if (dczid_el0 & DCZID_DZP) 684 dczva_line_size = 0; 685 else { 686 /* Same as with above calculations */ 687 dczva_line_shift = DCZID_BS_SIZE(dczid_el0); 688 dczva_line_size = sizeof(int) << dczva_line_shift; 689 690 /* Change pagezero function */ 691 pagezero = pagezero_cache; 692 } 693 } 694 695 int 696 memory_mapping_mode(vm_paddr_t pa) 697 { 698 struct efi_md *map, *p; 699 size_t efisz; 700 int ndesc, i; 701 702 if (efihdr == NULL) 703 return (VM_MEMATTR_WRITE_BACK); 704 705 /* 706 * Memory map data provided by UEFI via the GetMemoryMap 707 * Boot Services API. 708 */ 709 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; 710 map = (struct efi_md *)((uint8_t *)efihdr + efisz); 711 712 if (efihdr->descriptor_size == 0) 713 return (VM_MEMATTR_WRITE_BACK); 714 ndesc = efihdr->memory_size / efihdr->descriptor_size; 715 716 for (i = 0, p = map; i < ndesc; i++, 717 p = efi_next_descriptor(p, efihdr->descriptor_size)) { 718 if (pa < p->md_phys || 719 pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE) 720 continue; 721 if (p->md_type == EFI_MD_TYPE_IOMEM || 722 p->md_type == EFI_MD_TYPE_IOPORT) 723 return (VM_MEMATTR_DEVICE); 724 else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 || 725 p->md_type == EFI_MD_TYPE_RECLAIM) 726 return (VM_MEMATTR_WRITE_BACK); 727 else if ((p->md_attr & EFI_MD_ATTR_WT) != 0) 728 return (VM_MEMATTR_WRITE_THROUGH); 729 else if ((p->md_attr & EFI_MD_ATTR_WC) != 0) 730 return (VM_MEMATTR_WRITE_COMBINING); 731 break; 732 } 733 734 return (VM_MEMATTR_DEVICE); 735 } 736 737 void 738 initarm(struct arm64_bootparams *abp) 739 { 740 struct efi_fb *efifb; 741 struct pcpu *pcpup; 742 char *env; 743 #ifdef FDT 744 struct mem_region mem_regions[FDT_MEM_REGIONS]; 745 int mem_regions_sz; 746 phandle_t root; 747 char dts_version[255]; 748 #endif 749 vm_offset_t lastaddr; 750 caddr_t kmdp; 751 bool valid; 752 753 TSRAW(&thread0, TS_ENTER, __func__, NULL); 754 755 boot_el = abp->boot_el; 756 757 /* Parse loader or FDT boot parametes. Determine last used address. */ 758 lastaddr = parse_boot_param(abp); 759 760 /* Find the kernel address */ 761 kmdp = preload_search_by_type("elf kernel"); 762 if (kmdp == NULL) 763 kmdp = preload_search_by_type("elf64 kernel"); 764 765 identify_cpu(0); 766 update_special_regs(0); 767 768 link_elf_ireloc(kmdp); 769 try_load_dtb(kmdp); 770 771 efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t); 772 773 /* Load the physical memory ranges */ 774 efihdr = (struct efi_map_header *)preload_search_info(kmdp, 775 MODINFO_METADATA | MODINFOMD_EFI_MAP); 776 if (efihdr != NULL) 777 add_efi_map_entries(efihdr); 778 #ifdef FDT 779 else { 780 /* Grab physical memory regions information from device tree. */ 781 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, 782 NULL) != 0) 783 panic("Cannot get physical memory regions"); 784 physmem_hardware_regions(mem_regions, mem_regions_sz); 785 } 786 if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) 787 physmem_exclude_regions(mem_regions, mem_regions_sz, 788 EXFLAG_NODUMP | EXFLAG_NOALLOC); 789 #endif 790 791 /* Exclude the EFI framebuffer from our view of physical memory. */ 792 efifb = (struct efi_fb *)preload_search_info(kmdp, 793 MODINFO_METADATA | MODINFOMD_EFI_FB); 794 if (efifb != NULL) 795 physmem_exclude_region(efifb->fb_addr, efifb->fb_size, 796 EXFLAG_NOALLOC); 797 798 /* Set the pcpu data, this is needed by pmap_bootstrap */ 799 pcpup = &pcpu0; 800 pcpu_init(pcpup, 0, sizeof(struct pcpu)); 801 802 /* 803 * Set the pcpu pointer with a backup in tpidr_el1 to be 804 * loaded when entering the kernel from userland. 805 */ 806 __asm __volatile( 807 "mov x18, %0 \n" 808 "msr tpidr_el1, %0" :: "r"(pcpup)); 809 810 /* locore.S sets sp_el0 to &thread0 so no need to set it here. */ 811 PCPU_SET(curthread, &thread0); 812 PCPU_SET(midr, get_midr()); 813 814 /* Do basic tuning, hz etc */ 815 init_param1(); 816 817 cache_setup(); 818 pan_setup(); 819 820 /* Bootstrap enough of pmap to enter the kernel proper */ 821 pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt, 822 KERNBASE - abp->kern_delta, lastaddr - KERNBASE); 823 /* Exclude entries needed in the DMAP region, but not phys_avail */ 824 if (efihdr != NULL) 825 exclude_efi_map_entries(efihdr); 826 physmem_init_kernel_globals(); 827 828 devmap_bootstrap(0, NULL); 829 830 valid = bus_probe(); 831 832 cninit(); 833 set_ttbr0(abp->kern_ttbr0); 834 cpu_tlb_flushID(); 835 836 if (!valid) 837 panic("Invalid bus configuration: %s", 838 kern_getenv("kern.cfg.order")); 839 840 /* 841 * Check if pointer authentication is available on this system, and 842 * if so enable its use. This needs to be called before init_proc0 843 * as that will configure the thread0 pointer authentication keys. 844 */ 845 ptrauth_init(); 846 847 /* 848 * Dump the boot metadata. We have to wait for cninit() since console 849 * output is required. If it's grossly incorrect the kernel will never 850 * make it this far. 851 */ 852 if (getenv_is_true("debug.dump_modinfo_at_boot")) 853 preload_dump(); 854 855 init_proc0(abp->kern_stack); 856 msgbufinit(msgbufp, msgbufsize); 857 mutex_init(); 858 init_param2(physmem); 859 860 dbg_init(); 861 kdb_init(); 862 #ifdef KDB 863 if ((boothowto & RB_KDB) != 0) 864 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); 865 #endif 866 pan_enable(); 867 868 kcsan_cpu_init(0); 869 870 env = kern_getenv("kernelname"); 871 if (env != NULL) 872 strlcpy(kernelname, env, sizeof(kernelname)); 873 874 #ifdef FDT 875 if (arm64_bus_method == ARM64_BUS_FDT) { 876 root = OF_finddevice("/"); 877 if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) { 878 if (strcmp(LINUX_DTS_VERSION, dts_version) != 0) 879 printf("WARNING: DTB version is %s while kernel expects %s, " 880 "please update the DTB in the ESP\n", 881 dts_version, 882 LINUX_DTS_VERSION); 883 } else { 884 printf("WARNING: Cannot find freebsd,dts-version property, " 885 "cannot check DTB compliance\n"); 886 } 887 } 888 #endif 889 890 if (boothowto & RB_VERBOSE) { 891 if (efihdr != NULL) 892 print_efi_map_entries(efihdr); 893 physmem_print_tables(); 894 } 895 896 early_boot = 0; 897 898 TSEXIT(); 899 } 900 901 void 902 dbg_init(void) 903 { 904 905 /* Clear OS lock */ 906 WRITE_SPECIALREG(oslar_el1, 0); 907 908 /* This permits DDB to use debug registers for watchpoints. */ 909 dbg_monitor_init(); 910 911 /* TODO: Eventually will need to initialize debug registers here. */ 912 } 913 914 #ifdef DDB 915 #include <ddb/ddb.h> 916 917 DB_SHOW_COMMAND(specialregs, db_show_spregs) 918 { 919 #define PRINT_REG(reg) \ 920 db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg)) 921 922 PRINT_REG(actlr_el1); 923 PRINT_REG(afsr0_el1); 924 PRINT_REG(afsr1_el1); 925 PRINT_REG(aidr_el1); 926 PRINT_REG(amair_el1); 927 PRINT_REG(ccsidr_el1); 928 PRINT_REG(clidr_el1); 929 PRINT_REG(contextidr_el1); 930 PRINT_REG(cpacr_el1); 931 PRINT_REG(csselr_el1); 932 PRINT_REG(ctr_el0); 933 PRINT_REG(currentel); 934 PRINT_REG(daif); 935 PRINT_REG(dczid_el0); 936 PRINT_REG(elr_el1); 937 PRINT_REG(esr_el1); 938 PRINT_REG(far_el1); 939 #if 0 940 /* ARM64TODO: Enable VFP before reading floating-point registers */ 941 PRINT_REG(fpcr); 942 PRINT_REG(fpsr); 943 #endif 944 PRINT_REG(id_aa64afr0_el1); 945 PRINT_REG(id_aa64afr1_el1); 946 PRINT_REG(id_aa64dfr0_el1); 947 PRINT_REG(id_aa64dfr1_el1); 948 PRINT_REG(id_aa64isar0_el1); 949 PRINT_REG(id_aa64isar1_el1); 950 PRINT_REG(id_aa64pfr0_el1); 951 PRINT_REG(id_aa64pfr1_el1); 952 PRINT_REG(id_afr0_el1); 953 PRINT_REG(id_dfr0_el1); 954 PRINT_REG(id_isar0_el1); 955 PRINT_REG(id_isar1_el1); 956 PRINT_REG(id_isar2_el1); 957 PRINT_REG(id_isar3_el1); 958 PRINT_REG(id_isar4_el1); 959 PRINT_REG(id_isar5_el1); 960 PRINT_REG(id_mmfr0_el1); 961 PRINT_REG(id_mmfr1_el1); 962 PRINT_REG(id_mmfr2_el1); 963 PRINT_REG(id_mmfr3_el1); 964 #if 0 965 /* Missing from llvm */ 966 PRINT_REG(id_mmfr4_el1); 967 #endif 968 PRINT_REG(id_pfr0_el1); 969 PRINT_REG(id_pfr1_el1); 970 PRINT_REG(isr_el1); 971 PRINT_REG(mair_el1); 972 PRINT_REG(midr_el1); 973 PRINT_REG(mpidr_el1); 974 PRINT_REG(mvfr0_el1); 975 PRINT_REG(mvfr1_el1); 976 PRINT_REG(mvfr2_el1); 977 PRINT_REG(revidr_el1); 978 PRINT_REG(sctlr_el1); 979 PRINT_REG(sp_el0); 980 PRINT_REG(spsel); 981 PRINT_REG(spsr_el1); 982 PRINT_REG(tcr_el1); 983 PRINT_REG(tpidr_el0); 984 PRINT_REG(tpidr_el1); 985 PRINT_REG(tpidrro_el0); 986 PRINT_REG(ttbr0_el1); 987 PRINT_REG(ttbr1_el1); 988 PRINT_REG(vbar_el1); 989 #undef PRINT_REG 990 } 991 992 DB_SHOW_COMMAND(vtop, db_show_vtop) 993 { 994 uint64_t phys; 995 996 if (have_addr) { 997 phys = arm64_address_translate_s1e1r(addr); 998 db_printf("EL1 physical address reg (read): 0x%016lx\n", phys); 999 phys = arm64_address_translate_s1e1w(addr); 1000 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys); 1001 phys = arm64_address_translate_s1e0r(addr); 1002 db_printf("EL0 physical address reg (read): 0x%016lx\n", phys); 1003 phys = arm64_address_translate_s1e0w(addr); 1004 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys); 1005 } else 1006 db_printf("show vtop <virt_addr>\n"); 1007 } 1008 #endif 1009