1 /* $OpenBSD: machdep.c,v 1.55 2020/07/21 21:36:58 kettenis Exp $ */ 2 3 /* 4 * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/buf.h> 21 #include <sys/exec.h> 22 #include <sys/exec_elf.h> 23 #include <sys/mount.h> 24 #include <sys/msgbuf.h> 25 #include <sys/proc.h> 26 #include <sys/reboot.h> 27 #include <sys/signalvar.h> 28 #include <sys/syscallargs.h> 29 #include <sys/sysctl.h> 30 #include <sys/systm.h> 31 #include <sys/user.h> 32 33 #include <machine/cpufunc.h> 34 #include <machine/fpu.h> 35 #include <machine/opal.h> 36 #include <machine/pcb.h> 37 #include <machine/psl.h> 38 #include <machine/trap.h> 39 40 #include <net/if.h> 41 #include <uvm/uvm_extern.h> 42 43 #include <dev/ofw/fdt.h> 44 #include <dev/cons.h> 45 46 #ifdef DDB 47 #include <machine/db_machdep.h> 48 #include <ddb/db_extern.h> 49 #include <ddb/db_interface.h> 50 #endif 51 52 int cacheline_size = 128; 53 54 struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 }; 55 struct uvm_constraint_range *uvm_md_constraints[] = { NULL }; 56 57 int cold = 1; 58 int safepri = 0; 59 int physmem; 60 paddr_t physmax; 61 62 struct vm_map *exec_map; 63 struct vm_map *phys_map; 64 65 char machine[] = MACHINE; 66 67 struct user *proc0paddr; 68 69 caddr_t ssym, esym; 70 71 extern char _start[], _end[]; 72 extern char __bss_start[]; 73 74 extern uint64_t opal_base; 75 extern uint64_t opal_entry; 76 77 extern char trapcode[], trapcodeend[]; 78 extern char hvtrapcode[], hvtrapcodeend[]; 79 extern char generictrap[]; 80 extern char generichvtrap[]; 81 82 extern char initstack[]; 83 84 struct fdt_reg memreg[VM_PHYSSEG_MAX]; 85 int nmemreg; 86 87 #ifdef DDB 88 struct fdt_reg initrd_reg; 89 #endif 90 91 void memreg_add(const struct fdt_reg *); 92 void memreg_remove(const struct fdt_reg *); 93 94 void parse_bootargs(const char *); 95 const char *parse_bootduid(const char *); 96 97 paddr_t fdt_pa; 98 size_t fdt_size; 99 100 void 101 init_powernv(void *fdt, void *tocbase) 102 { 103 struct fdt_reg reg; 104 register_t uspace; 105 paddr_t trap; 106 uint64_t msr; 107 void *node; 108 char *prop; 109 int len; 110 int i; 111 112 /* Store pointer to our struct cpu_info. */ 113 __asm volatile ("mtsprg0 %0" :: "r"(cpu_info_primary)); 114 __asm volatile ("mr %%r13, %0" :: "r"(cpu_info_primary)); 115 116 /* Clear BSS. */ 117 memset(__bss_start, 0, _end - __bss_start); 118 119 if (!fdt_init(fdt) || fdt_get_size(fdt) == 0) 120 panic("no FDT"); 121 122 /* Get OPAL base and entry addresses from FDT. */ 123 node = fdt_find_node("/ibm,opal"); 124 if (node) { 125 fdt_node_property(node, "opal-base-address", &prop); 126 opal_base = bemtoh64((uint64_t *)prop); 127 fdt_node_property(node, "opal-entry-address", &prop); 128 opal_entry = bemtoh64((uint64_t *)prop); 129 fdt_node_property(node, "compatible", &prop); 130 131 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE); 132 133 /* 134 * The following call will fail on Power ISA 2.0x CPUs, 135 * but that is fine since they don't support Radix Tree 136 * translation. On Power ISA 3.0 CPUs this will make 137 * the full TLB available. 138 */ 139 opal_reinit_cpus(OPAL_REINIT_CPUS_MMU_HASH); 140 } 141 142 /* At this point we can call OPAL runtime services and use printf(9). */ 143 printf("Hello, World!\n"); 144 145 /* Stash these such that we can remap the FDT later. */ 146 fdt_pa = (paddr_t)fdt; 147 fdt_size = fdt_get_size(fdt); 148 149 /* 150 * Initialize all traps with the stub that calls the generic 151 * trap handler. 152 */ 153 for (trap = EXC_RST; trap < EXC_LAST; trap += 32) 154 memcpy((void *)trap, trapcode, trapcodeend - trapcode); 155 156 /* Hypervisor interrupts needs special handling. */ 157 memcpy((void *)EXC_HDSI, hvtrapcode, hvtrapcodeend - hvtrapcode); 158 memcpy((void *)EXC_HISI, hvtrapcode, hvtrapcodeend - hvtrapcode); 159 memcpy((void *)EXC_HEA, hvtrapcode, hvtrapcodeend - hvtrapcode); 160 memcpy((void *)EXC_HMI, hvtrapcode, hvtrapcodeend - hvtrapcode); 161 memcpy((void *)EXC_HFAC, hvtrapcode, hvtrapcodeend - hvtrapcode); 162 memcpy((void *)EXC_HVI, hvtrapcode, hvtrapcodeend - hvtrapcode); 163 164 *((void **)TRAP_ENTRY) = generictrap; 165 *((void **)TRAP_HVENTRY) = generichvtrap; 166 167 /* Make the stubs visible to the CPU. */ 168 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 169 170 /* We're now ready to take traps. */ 171 msr = mfmsr(); 172 mtmsr(msr | (PSL_ME|PSL_RI)); 173 174 #define LPCR_LPES 0x0000000000000008UL 175 #define LPCR_HVICE 0x0000000000000002UL 176 177 mtlpcr(LPCR_LPES | LPCR_HVICE); 178 isync(); 179 180 /* Add all memory. */ 181 node = fdt_find_node("/"); 182 for (node = fdt_child_node(node); node; node = fdt_next_node(node)) { 183 len = fdt_node_property(node, "device_type", &prop); 184 if (len <= 0) 185 continue; 186 if (strcmp(prop, "memory") != 0) 187 continue; 188 for (i = 0; nmemreg < nitems(memreg); i++) { 189 if (fdt_get_reg(node, i, ®)) 190 break; 191 if (reg.size == 0) 192 continue; 193 memreg_add(®); 194 physmem += atop(reg.size); 195 physmax = MAX(physmax, reg.addr + reg.size); 196 } 197 } 198 199 /* Remove reserved memory. */ 200 node = fdt_find_node("/reserved-memory"); 201 if (node) { 202 for (node = fdt_child_node(node); node; 203 node = fdt_next_node(node)) { 204 if (fdt_get_reg(node, 0, ®)) 205 continue; 206 if (reg.size == 0) 207 continue; 208 memreg_remove(®); 209 } 210 } 211 212 /* Remove interrupt vectors. */ 213 reg.addr = trunc_page(EXC_RSVD); 214 reg.size = round_page(EXC_LAST); 215 memreg_remove(®); 216 217 /* Remove kernel. */ 218 reg.addr = trunc_page((paddr_t)_start); 219 reg.size = round_page((paddr_t)_end) - reg.addr; 220 memreg_remove(®); 221 222 /* Remove FDT. */ 223 reg.addr = trunc_page((paddr_t)fdt); 224 reg.size = round_page((paddr_t)fdt + fdt_get_size(fdt)) - reg.addr; 225 memreg_remove(®); 226 227 #ifdef DDB 228 /* Load symbols from initrd. */ 229 db_machine_init(); 230 if (initrd_reg.size != 0) 231 memreg_remove(&initrd_reg); 232 ssym = (caddr_t)initrd_reg.addr; 233 esym = ssym + initrd_reg.size; 234 #endif 235 236 pmap_bootstrap(); 237 uvm_setpagesize(); 238 239 for (i = 0; i < nmemreg; i++) { 240 paddr_t start = memreg[i].addr; 241 paddr_t end = start + memreg[i].size; 242 243 uvm_page_physload(atop(start), atop(end), 244 atop(start), atop(end), 0); 245 } 246 247 /* Enable translation. */ 248 msr = mfmsr(); 249 mtmsr(msr | (PSL_DR|PSL_IR)); 250 isync(); 251 252 initmsgbuf((caddr_t)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE); 253 254 proc0paddr = (struct user *)initstack; 255 proc0.p_addr = proc0paddr; 256 curpcb = &proc0.p_addr->u_pcb; 257 uspace = (register_t)proc0paddr + USPACE - FRAMELEN; 258 proc0.p_md.md_regs = (struct trapframe *)uspace; 259 } 260 261 void 262 memreg_add(const struct fdt_reg *reg) 263 { 264 memreg[nmemreg++] = *reg; 265 } 266 267 void 268 memreg_remove(const struct fdt_reg *reg) 269 { 270 uint64_t start = reg->addr; 271 uint64_t end = reg->addr + reg->size; 272 int i, j; 273 274 for (i = 0; i < nmemreg; i++) { 275 uint64_t memstart = memreg[i].addr; 276 uint64_t memend = memreg[i].addr + memreg[i].size; 277 278 if (end <= memstart) 279 continue; 280 if (start >= memend) 281 continue; 282 283 if (start <= memstart) 284 memstart = MIN(end, memend); 285 if (end >= memend) 286 memend = MAX(start, memstart); 287 288 if (start > memstart && end < memend) { 289 if (nmemreg < nitems(memreg)) { 290 memreg[nmemreg].addr = end; 291 memreg[nmemreg].size = memend - end; 292 nmemreg++; 293 } 294 memend = start; 295 } 296 memreg[i].addr = memstart; 297 memreg[i].size = memend - memstart; 298 } 299 300 /* Remove empty slots. */ 301 for (i = nmemreg - 1; i >= 0; i--) { 302 if (memreg[i].size == 0) { 303 for (j = i; (j + 1) < nmemreg; j++) 304 memreg[j] = memreg[j + 1]; 305 nmemreg--; 306 } 307 } 308 } 309 310 #define R_PPC64_RELATIVE 22 311 #define ELF_R_TYPE_RELATIVE R_PPC64_RELATIVE 312 313 /* 314 * Disable optimization for this function to prevent clang from 315 * generating jump tables that need relocation. 316 */ 317 __attribute__((optnone)) void 318 self_reloc(Elf_Dyn *dynamic, Elf_Addr base) 319 { 320 Elf_Word relasz = 0, relaent = sizeof(Elf_RelA); 321 Elf_RelA *rela = NULL; 322 Elf_Addr *addr; 323 Elf_Dyn *dynp; 324 325 for (dynp = dynamic; dynp->d_tag != DT_NULL; dynp++) { 326 switch (dynp->d_tag) { 327 case DT_RELA: 328 rela = (Elf_RelA *)(dynp->d_un.d_ptr + base); 329 break; 330 case DT_RELASZ: 331 relasz = dynp->d_un.d_val; 332 break; 333 case DT_RELAENT: 334 relaent = dynp->d_un.d_val; 335 break; 336 } 337 } 338 339 while (relasz > 0) { 340 switch (ELF_R_TYPE(rela->r_info)) { 341 case ELF_R_TYPE_RELATIVE: 342 addr = (Elf_Addr *)(base + rela->r_offset); 343 *addr = base + rela->r_addend; 344 break; 345 } 346 rela = (Elf_RelA *)((caddr_t)rela + relaent); 347 relasz -= relaent; 348 } 349 } 350 351 void * 352 opal_phys(void *va) 353 { 354 paddr_t pa; 355 356 pmap_extract(pmap_kernel(), (vaddr_t)va, &pa); 357 return (void *)pa; 358 } 359 360 void 361 opal_printf(const char *fmt, ...) 362 { 363 static char buf[256]; 364 uint64_t len; 365 va_list ap; 366 367 va_start(ap, fmt); 368 len = vsnprintf(buf, sizeof(buf), fmt, ap); 369 if (len == (uint64_t)-1) 370 len = 0; 371 else if (len >= sizeof(buf)) 372 len = sizeof(buf) - 1; 373 va_end(ap); 374 375 opal_console_write(0, opal_phys(&len), opal_phys(buf)); 376 } 377 378 void 379 opal_cnprobe(struct consdev *cd) 380 { 381 } 382 383 void 384 opal_cninit(struct consdev *cd) 385 { 386 } 387 388 int 389 opal_cngetc(dev_t dev) 390 { 391 uint64_t len; 392 char ch; 393 394 for (;;) { 395 len = 1; 396 opal_console_read(0, opal_phys(&len), opal_phys(&ch)); 397 if (len) 398 return ch; 399 opal_poll_events(NULL); 400 } 401 } 402 403 void 404 opal_cnputc(dev_t dev, int c) 405 { 406 uint64_t len = 1; 407 char ch = c; 408 int64_t error; 409 410 opal_console_write(0, opal_phys(&len), opal_phys(&ch)); 411 while (1) { 412 error = opal_console_flush(0); 413 if (error != OPAL_BUSY && error != OPAL_PARTIAL) 414 break; 415 delay(1); 416 } 417 } 418 419 void 420 opal_cnpollc(dev_t dev, int on) 421 { 422 } 423 424 struct consdev opal_consdev = { 425 .cn_probe = opal_cnprobe, 426 .cn_init = opal_cninit, 427 .cn_getc = opal_cngetc, 428 .cn_putc = opal_cnputc, 429 .cn_pollc = opal_cnpollc, 430 }; 431 432 struct consdev *cn_tab = &opal_consdev; 433 434 int 435 copyin(const void *uaddr, void *kaddr, size_t len) 436 { 437 pmap_t pm = curproc->p_vmspace->vm_map.pmap; 438 vaddr_t kva; 439 vsize_t klen; 440 int error; 441 442 while (len > 0) { 443 error = pmap_set_user_slb(pm, (vaddr_t)uaddr, &kva, &klen); 444 if (error) 445 return error; 446 if (klen > len) 447 klen = len; 448 error = kcopy((const void *)kva, kaddr, klen); 449 pmap_unset_user_slb(); 450 if (error) 451 return error; 452 453 uaddr = (const char *)uaddr + klen; 454 kaddr = (char *)kaddr + klen; 455 len -= klen; 456 } 457 458 return 0; 459 } 460 461 int 462 copyin32(const uint32_t *uaddr, uint32_t *kaddr) 463 { 464 return copyin(uaddr, kaddr, sizeof(uint32_t)); 465 } 466 467 int 468 copyout(const void *kaddr, void *uaddr, size_t len) 469 { 470 pmap_t pm = curproc->p_vmspace->vm_map.pmap; 471 vaddr_t kva; 472 vsize_t klen; 473 int error; 474 475 while (len > 0) { 476 error = pmap_set_user_slb(pm, (vaddr_t)uaddr, &kva, &klen); 477 if (error) 478 return error; 479 if (klen > len) 480 klen = len; 481 error = kcopy(kaddr, (void *)kva, klen); 482 pmap_unset_user_slb(); 483 if (error) 484 return error; 485 486 kaddr = (const char *)kaddr + klen; 487 uaddr = (char *)uaddr + klen; 488 len -= klen; 489 } 490 491 return 0; 492 } 493 494 int 495 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) 496 { 497 pmap_t pm = curproc->p_vmspace->vm_map.pmap; 498 vaddr_t kva; 499 vsize_t klen; 500 size_t count, total; 501 int error = 0; 502 503 if (len == 0) 504 return ENAMETOOLONG; 505 506 total = 0; 507 while (len > 0) { 508 error = pmap_set_user_slb(pm, (vaddr_t)uaddr, &kva, &klen); 509 if (error) 510 goto out; 511 if (klen > len) 512 klen = len; 513 error = copystr((const void *)kva, kaddr, klen, &count); 514 total += count; 515 pmap_unset_user_slb(); 516 if (error == 0 || error == EFAULT) 517 goto out; 518 519 uaddr = (const char *)uaddr + klen; 520 kaddr = (char *)kaddr + klen; 521 len -= klen; 522 } 523 524 out: 525 if (done) 526 *done = total; 527 return error; 528 } 529 530 int 531 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) 532 { 533 pmap_t pm = curproc->p_vmspace->vm_map.pmap; 534 vaddr_t kva; 535 vsize_t klen; 536 size_t count, total; 537 int error = 0; 538 539 if (len == 0) 540 return ENAMETOOLONG; 541 542 total = 0; 543 while (len > 0) { 544 error = pmap_set_user_slb(pm, (vaddr_t)uaddr, &kva, &klen); 545 if (error) 546 goto out; 547 if (klen > len) 548 klen = len; 549 error = copystr(kaddr, (void *)kva, klen, &count); 550 total += count; 551 pmap_unset_user_slb(); 552 if (error == 0 || error == EFAULT) 553 goto out; 554 555 kaddr = (const char *)kaddr + klen; 556 uaddr = (char *)uaddr + klen; 557 len -= klen; 558 } 559 560 out: 561 if (done) 562 *done = total; 563 return error; 564 } 565 566 void 567 need_resched(struct cpu_info *ci) 568 { 569 ci->ci_want_resched = 1; 570 571 /* There's a risk we'll be called before the idle threads start */ 572 if (ci->ci_curproc) { 573 aston(ci->ci_curproc); 574 cpu_kick(ci); 575 } 576 } 577 578 void 579 cpu_startup(void) 580 { 581 vaddr_t minaddr, maxaddr, va; 582 paddr_t pa, epa; 583 void *fdt; 584 void *node; 585 char *prop; 586 int len; 587 588 printf("%s", version); 589 590 printf("real mem = %lu (%luMB)\n", ptoa(physmem), 591 ptoa(physmem)/1024/1024); 592 593 /* 594 * Allocate a submap for exec arguments. This map effectively 595 * limits the number of processes exec'ing at any time. 596 */ 597 minaddr = vm_map_min(kernel_map); 598 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 599 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 600 601 /* 602 * Allocate a submap for physio. 603 */ 604 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 605 VM_PHYS_SIZE, 0, FALSE, NULL); 606 607 /* 608 * Set up buffers, so they can be used to read disk labels. 609 */ 610 bufinit(); 611 612 printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), 613 ptoa(uvmexp.free)/1024/1024); 614 615 /* Remap the FDT. */ 616 pa = trunc_page(fdt_pa); 617 epa = round_page(fdt_pa + fdt_size); 618 va = (vaddr_t)km_alloc(epa - pa, &kv_any, &kp_none, &kd_waitok); 619 fdt = (void *)(va + (fdt_pa & PAGE_MASK)); 620 while (pa < epa) { 621 pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE); 622 va += PAGE_SIZE; 623 pa += PAGE_SIZE; 624 } 625 626 if (!fdt_init(fdt) || fdt_get_size(fdt) == 0) 627 panic("can't remap FDT"); 628 629 intr_init(); 630 631 node = fdt_find_node("/chosen"); 632 if (node) { 633 len = fdt_node_property(node, "bootargs", &prop); 634 if (len > 0) 635 parse_bootargs(prop); 636 637 len = fdt_node_property(node, "openbsd,boothowto", &prop); 638 if (len == sizeof(boothowto)) 639 boothowto = bemtoh32((uint32_t *)prop); 640 641 len = fdt_node_property(node, "openbsd,bootduid", &prop); 642 if (len == sizeof(bootduid)) 643 memcpy(bootduid, prop, sizeof(bootduid)); 644 } 645 646 if (boothowto & RB_CONFIG) { 647 #ifdef BOOT_CONFIG 648 user_config(); 649 #else 650 printf("kernel does not support -c; continuing..\n"); 651 #endif 652 } 653 } 654 655 void 656 parse_bootargs(const char *bootargs) 657 { 658 const char *cp = bootargs; 659 660 if (strncmp(cp, "bootduid=", strlen("bootduid=")) == 0) 661 cp = parse_bootduid(cp + strlen("bootduid=")); 662 663 while (*cp != '-') 664 if (*cp++ == '\0') 665 return; 666 cp++; 667 668 while (*cp != 0) { 669 switch(*cp) { 670 case 'a': 671 boothowto |= RB_ASKNAME; 672 break; 673 case 'c': 674 boothowto |= RB_CONFIG; 675 break; 676 case 'd': 677 boothowto |= RB_KDB; 678 break; 679 case 's': 680 boothowto |= RB_SINGLE; 681 break; 682 default: 683 printf("unknown option `%c'\n", *cp); 684 break; 685 } 686 cp++; 687 } 688 } 689 690 const char * 691 parse_bootduid(const char *bootarg) 692 { 693 const char *cp = bootarg; 694 uint64_t duid = 0; 695 int digit, count = 0; 696 697 while (count < 16) { 698 if (*cp >= '0' && *cp <= '9') 699 digit = *cp - '0'; 700 else if (*cp >= 'a' && *cp <= 'f') 701 digit = *cp - 'a' + 10; 702 else 703 break; 704 duid *= 16; 705 duid += digit; 706 count++; 707 cp++; 708 } 709 710 if (count > 0) { 711 memcpy(&bootduid, &duid, sizeof(bootduid)); 712 return cp; 713 } 714 715 return bootarg; 716 } 717 718 #define PSL_USER \ 719 (PSL_SF | PSL_HV | PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI) 720 721 void 722 setregs(struct proc *p, struct exec_package *pack, u_long stack, 723 register_t *retval) 724 { 725 struct trapframe *frame = p->p_md.md_regs; 726 struct pcb *pcb = &p->p_addr->u_pcb; 727 struct ps_strings arginfo; 728 729 copyin((void *)p->p_p->ps_strings, &arginfo, sizeof(arginfo)); 730 731 memset(frame, 0, sizeof(*frame)); 732 frame->fixreg[1] = stack; 733 frame->fixreg[3] = retval[0] = arginfo.ps_nargvstr; 734 frame->fixreg[4] = retval[1] = (register_t)arginfo.ps_argvstr; 735 frame->fixreg[5] = (register_t)arginfo.ps_envstr; 736 frame->fixreg[6] = (register_t)pack->ep_emul_argp; 737 frame->fixreg[12] = pack->ep_entry; 738 frame->srr0 = pack->ep_entry; 739 frame->srr1 = PSL_USER; 740 741 memset(&pcb->pcb_fpstate, 0, sizeof(pcb->pcb_fpstate)); 742 pcb->pcb_flags = 0; 743 } 744 745 void 746 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip) 747 { 748 struct proc *p = curproc; 749 struct pcb *pcb = &p->p_addr->u_pcb; 750 struct trapframe *tf = p->p_md.md_regs; 751 struct sigframe *fp, frame; 752 struct sigacts *psp = p->p_p->ps_sigacts; 753 siginfo_t *sip = NULL; 754 int i; 755 756 /* Allocate space for the signal handler context. */ 757 if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 && 758 !sigonstack(tf->fixreg[1]) && (psp->ps_sigonstack & sigmask(sig))) 759 fp = (struct sigframe *) 760 trunc_page((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size); 761 else 762 fp = (struct sigframe *)tf->fixreg[1]; 763 764 fp = (struct sigframe *)(STACKALIGN(fp - 1) - 288); 765 766 /* Save FPU state to PCB if necessary. */ 767 if (pcb->pcb_flags & (PCB_FP|PCB_VEC|PCB_VSX) && 768 tf->srr1 & (PSL_FP|PSL_VEC|PSL_VSX)) { 769 tf->srr1 &= ~(PSL_FP|PSL_VEC|PSL_VSX); 770 save_vsx(p); 771 } 772 773 /* Build stack frame for signal trampoline. */ 774 memset(&frame, 0, sizeof(frame)); 775 frame.sf_signum = sig; 776 777 /* Save register context. */ 778 for (i = 0; i < 32; i++) 779 frame.sf_sc.sc_reg[i] = tf->fixreg[i]; 780 frame.sf_sc.sc_lr = tf->lr; 781 frame.sf_sc.sc_cr = tf->cr; 782 frame.sf_sc.sc_xer = tf->xer; 783 frame.sf_sc.sc_ctr = tf->ctr; 784 frame.sf_sc.sc_pc = tf->srr0; 785 frame.sf_sc.sc_ps = tf->srr1; 786 frame.sf_sc.sc_vrsave = tf->vrsave; 787 788 /* Copy the saved FPU state into the frame if necessary. */ 789 if (pcb->pcb_flags & (PCB_FP|PCB_VEC|PCB_VSX)) { 790 memcpy(frame.sf_sc.sc_vsx, pcb->pcb_fpstate.fp_vsx, 791 sizeof(pcb->pcb_fpstate.fp_vsx)); 792 frame.sf_sc.sc_fpscr = pcb->pcb_fpstate.fp_fpscr; 793 frame.sf_sc.sc_vscr = pcb->pcb_fpstate.fp_vscr; 794 } 795 796 /* Save signal mask. */ 797 frame.sf_sc.sc_mask = mask; 798 799 if (psp->ps_siginfo & sigmask(sig)) { 800 sip = &fp->sf_si; 801 frame.sf_si = *ksip; 802 } 803 804 frame.sf_sc.sc_cookie = (long)&fp->sf_sc ^ p->p_p->ps_sigcookie; 805 if (copyout(&frame, fp, sizeof(frame))) 806 sigexit(p, SIGILL); 807 808 /* 809 * Build context to run handler in. 810 */ 811 tf->fixreg[1] = (register_t)fp; 812 tf->fixreg[3] = sig; 813 tf->fixreg[4] = (register_t)sip; 814 tf->fixreg[5] = (register_t)&fp->sf_sc; 815 tf->fixreg[12] = (register_t)catcher; 816 817 tf->srr0 = p->p_p->ps_sigcode; 818 } 819 820 int 821 sys_sigreturn(struct proc *p, void *v, register_t *retval) 822 { 823 struct sys_sigreturn_args /* { 824 syscallarg(struct sigcontext *) sigcntxp; 825 } */ *uap = v; 826 struct sigcontext ksc, *scp = SCARG(uap, sigcntxp); 827 struct trapframe *tf = p->p_md.md_regs; 828 struct pcb *pcb = &p->p_addr->u_pcb; 829 int error; 830 int i; 831 832 if (PROC_PC(p) != p->p_p->ps_sigcoderet) { 833 sigexit(p, SIGILL); 834 return EPERM; 835 } 836 837 if ((error = copyin(scp, &ksc, sizeof ksc))) 838 return error; 839 840 if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) { 841 sigexit(p, SIGILL); 842 return EFAULT; 843 } 844 845 /* Prevent reuse of the sigcontext cookie */ 846 ksc.sc_cookie = 0; 847 (void)copyout(&ksc.sc_cookie, (caddr_t)scp + 848 offsetof(struct sigcontext, sc_cookie), sizeof (ksc.sc_cookie)); 849 850 /* Make sure the processor mode has not been tampered with. */ 851 if (ksc.sc_ps != PSL_USER) 852 return EINVAL; 853 854 /* Restore register context. */ 855 for (i = 0; i < 32; i++) 856 tf->fixreg[i] = ksc.sc_reg[i]; 857 tf->lr = ksc.sc_lr; 858 tf->cr = ksc.sc_cr; 859 tf->xer = ksc.sc_xer; 860 tf->ctr = ksc.sc_ctr; 861 tf->srr0 = ksc.sc_pc; 862 tf->srr1 = ksc.sc_ps; 863 tf->vrsave = ksc.sc_vrsave; 864 865 /* Write saved FPU state back to PCB if necessary. */ 866 if (pcb->pcb_flags & (PCB_FP|PCB_VEC|PCB_VSX)) { 867 memcpy(pcb->pcb_fpstate.fp_vsx, ksc.sc_vsx, 868 sizeof(pcb->pcb_fpstate.fp_vsx)); 869 pcb->pcb_fpstate.fp_fpscr = ksc.sc_fpscr; 870 pcb->pcb_fpstate.fp_vscr = ksc.sc_vscr; 871 } 872 873 /* Restore signal mask. */ 874 p->p_sigmask = ksc.sc_mask & ~sigcantmask; 875 876 return EJUSTRETURN; 877 } 878 879 void cpu_switchto_asm(struct proc *, struct proc *); 880 881 void 882 cpu_switchto(struct proc *old, struct proc *new) 883 { 884 if (old) { 885 struct pcb *pcb = &old->p_addr->u_pcb; 886 struct trapframe *tf = old->p_md.md_regs; 887 888 if (pcb->pcb_flags & (PCB_FP|PCB_VEC|PCB_VSX) && 889 tf->srr1 & (PSL_FP|PSL_VEC|PSL_VSX)) { 890 tf->srr1 &= ~(PSL_FP|PSL_VEC|PSL_VSX); 891 save_vsx(old); 892 } 893 } 894 895 cpu_switchto_asm(old, new); 896 } 897 898 /* 899 * machine dependent system variables. 900 */ 901 902 int 903 cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 904 size_t newlen, struct proc *p) 905 { 906 int altivec = 1; /* Altivec is always supported */ 907 908 /* All sysctl names at this level are terminal. */ 909 if (namelen != 1) 910 return ENOTDIR; /* overloaded */ 911 912 switch (name[0]) { 913 case CPU_ALTIVEC: 914 return (sysctl_rdint(oldp, oldlenp, newp, altivec)); 915 default: 916 return EOPNOTSUPP; 917 } 918 /* NOTREACHED */ 919 } 920 921 void 922 consinit(void) 923 { 924 } 925 926 void 927 opal_powerdown(void) 928 { 929 int64_t error; 930 931 do { 932 error = opal_cec_power_down(0); 933 if (error == OPAL_BUSY_EVENT) 934 opal_poll_events(NULL); 935 } while (error == OPAL_BUSY || error == OPAL_BUSY_EVENT); 936 937 if (error != OPAL_SUCCESS) 938 return; 939 940 /* Wait for the actual powerdown to happen. */ 941 for (;;) 942 opal_poll_events(NULL); 943 } 944 945 int waittime = -1; 946 947 __dead void 948 boot(int howto) 949 { 950 if ((howto & RB_RESET) != 0) 951 goto doreset; 952 953 if (cold) { 954 if ((howto & RB_USERREQ) == 0) 955 howto |= RB_HALT; 956 goto haltsys; 957 } 958 959 boothowto = howto; 960 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 961 waittime = 0; 962 vfs_shutdown(curproc); 963 964 if ((howto & RB_TIMEBAD) == 0) { 965 resettodr(); 966 } else { 967 printf("WARNING: not updating battery clock\n"); 968 } 969 } 970 if_downall(); 971 972 uvm_shutdown(); 973 splhigh(); 974 cold = 1; 975 976 haltsys: 977 config_suspend_all(DVACT_POWERDOWN); 978 979 if ((howto & RB_HALT) != 0) { 980 if ((howto & RB_POWERDOWN) != 0) 981 opal_powerdown(); 982 983 printf("\n"); 984 printf("The operating system has halted.\n"); 985 printf("Please press any key to reboot.\n\n"); 986 cngetc(); 987 } 988 989 doreset: 990 printf("rebooting...\n"); 991 opal_cec_reboot(); 992 993 for (;;) 994 continue; 995 /* NOTREACHED */ 996 } 997