1 /*- 2 * Copyright (c) 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94 35 * $FreeBSD: src/sys/kern/kern_shutdown.c,v 1.72.2.12 2002/02/21 19:15:10 dillon Exp $ 36 */ 37 38 #include "opt_ddb.h" 39 #include "opt_ddb_trace.h" 40 #include "opt_panic.h" 41 #include "opt_show_busybufs.h" 42 #include "use_gpio.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/eventhandler.h> 47 #include <sys/buf.h> 48 #include <sys/disk.h> 49 #include <sys/diskslice.h> 50 #include <sys/reboot.h> 51 #include <sys/proc.h> 52 #include <sys/priv.h> 53 #include <sys/fcntl.h> /* FREAD */ 54 #include <sys/stat.h> /* S_IFCHR */ 55 #include <sys/vnode.h> 56 #include <sys/kernel.h> 57 #include <sys/kerneldump.h> 58 #include <sys/kthread.h> 59 #include <sys/malloc.h> 60 #include <sys/mount.h> 61 #include <sys/queue.h> 62 #include <sys/sysctl.h> 63 #include <sys/vkernel.h> 64 #include <sys/conf.h> 65 #include <sys/sysproto.h> 66 #include <sys/device.h> 67 #include <sys/cons.h> 68 #include <sys/shm.h> 69 #include <sys/kern_syscall.h> 70 #include <vm/vm_map.h> 71 #include <vm/pmap.h> 72 73 #include <sys/thread2.h> 74 #include <sys/buf2.h> 75 #include <sys/mplock2.h> 76 77 #include <machine/cpu.h> 78 #include <machine/clock.h> 79 #include <machine/md_var.h> 80 #include <machine/smp.h> /* smp_active_mask, cpuid */ 81 #include <machine/vmparam.h> 82 #include <machine/thread.h> 83 84 #include <sys/signalvar.h> 85 86 #include <sys/wdog.h> 87 #include <dev/misc/gpio/gpio.h> 88 89 #ifndef PANIC_REBOOT_WAIT_TIME 90 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */ 91 #endif 92 93 /* 94 * Note that stdarg.h and the ANSI style va_start macro is used for both 95 * ANSI and traditional C compilers. We use the machine version to stay 96 * within the confines of the kernel header files. 97 */ 98 #include <machine/stdarg.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #ifdef DDB_UNATTENDED 103 int debugger_on_panic = 0; 104 #else 105 int debugger_on_panic = 1; 106 #endif 107 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, CTLFLAG_RW, 108 &debugger_on_panic, 0, "Run debugger on kernel panic"); 109 110 #ifdef DDB_TRACE 111 int trace_on_panic = 1; 112 #else 113 int trace_on_panic = 0; 114 #endif 115 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, CTLFLAG_RW, 116 &trace_on_panic, 0, "Print stack trace on kernel panic"); 117 #endif 118 119 static int sync_on_panic = 0; 120 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RW, 121 &sync_on_panic, 0, "Do a sync before rebooting from a panic"); 122 123 SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment"); 124 125 /* 126 * Variable panicstr contains argument to first call to panic; used as flag 127 * to indicate that the kernel has already called panic. 128 */ 129 const char *panicstr; 130 131 int dumping; /* system is dumping */ 132 static struct dumperinfo dumper; /* selected dumper */ 133 134 globaldata_t panic_cpu_gd; /* which cpu took the panic */ 135 struct lwkt_tokref panic_tokens[LWKT_MAXTOKENS]; 136 int panic_tokens_count; 137 138 int bootverbose = 0; /* note: assignment to force non-bss */ 139 SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, 140 &bootverbose, 0, "Verbose kernel messages"); 141 142 int cold = 1; /* note: assignment to force non-bss */ 143 int dumplo; /* OBSOLETE - savecore compat */ 144 u_int64_t dumplo64; 145 146 static void boot (int) __dead2; 147 static int setdumpdev (cdev_t dev); 148 static void poweroff_wait (void *, int); 149 static void print_uptime (void); 150 static void shutdown_halt (void *junk, int howto); 151 static void shutdown_panic (void *junk, int howto); 152 static void shutdown_reset (void *junk, int howto); 153 static int shutdown_busycount1(struct buf *bp, void *info); 154 static int shutdown_busycount2(struct buf *bp, void *info); 155 static void shutdown_cleanup_proc(struct proc *p); 156 157 /* register various local shutdown events */ 158 static void 159 shutdown_conf(void *unused) 160 { 161 EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, SHUTDOWN_PRI_FIRST); 162 EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, SHUTDOWN_PRI_LAST + 100); 163 EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, SHUTDOWN_PRI_LAST + 100); 164 EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, SHUTDOWN_PRI_LAST + 200); 165 } 166 167 SYSINIT(shutdown_conf, SI_BOOT2_MACHDEP, SI_ORDER_ANY, shutdown_conf, NULL) 168 169 /* ARGSUSED */ 170 171 /* 172 * The system call that results in a reboot 173 * 174 * MPALMOSTSAFE 175 */ 176 int 177 sys_reboot(struct reboot_args *uap) 178 { 179 struct thread *td = curthread; 180 int error; 181 182 if ((error = priv_check(td, PRIV_REBOOT))) 183 return (error); 184 185 get_mplock(); 186 boot(uap->opt); 187 rel_mplock(); 188 return (0); 189 } 190 191 /* 192 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC 193 */ 194 static int shutdown_howto = 0; 195 196 void 197 shutdown_nice(int howto) 198 { 199 shutdown_howto = howto; 200 201 /* Send a signal to init(8) and have it shutdown the world */ 202 if (initproc != NULL) { 203 ksignal(initproc, SIGINT); 204 } else { 205 /* No init(8) running, so simply reboot */ 206 boot(RB_NOSYNC); 207 } 208 return; 209 } 210 static int waittime = -1; 211 struct pcb dumppcb; 212 struct thread *dumpthread; 213 214 static void 215 print_uptime(void) 216 { 217 int f; 218 struct timespec ts; 219 220 getnanouptime(&ts); 221 kprintf("Uptime: "); 222 f = 0; 223 if (ts.tv_sec >= 86400) { 224 kprintf("%ldd", ts.tv_sec / 86400); 225 ts.tv_sec %= 86400; 226 f = 1; 227 } 228 if (f || ts.tv_sec >= 3600) { 229 kprintf("%ldh", ts.tv_sec / 3600); 230 ts.tv_sec %= 3600; 231 f = 1; 232 } 233 if (f || ts.tv_sec >= 60) { 234 kprintf("%ldm", ts.tv_sec / 60); 235 ts.tv_sec %= 60; 236 f = 1; 237 } 238 kprintf("%lds\n", ts.tv_sec); 239 } 240 241 /* 242 * Go through the rigmarole of shutting down.. 243 * this used to be in machdep.c but I'll be dammned if I could see 244 * anything machine dependant in it. 245 */ 246 static void 247 boot(int howto) 248 { 249 /* 250 * Get rid of any user scheduler baggage and then give 251 * us a high priority. 252 */ 253 if (curthread->td_release) 254 curthread->td_release(curthread); 255 lwkt_setpri_self(TDPRI_MAX); 256 257 /* collect extra flags that shutdown_nice might have set */ 258 howto |= shutdown_howto; 259 260 /* 261 * We really want to shutdown on the BSP. Subsystems such as ACPI 262 * can't power-down the box otherwise. 263 */ 264 if (smp_active_mask > 1) { 265 kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid); 266 } 267 if (panicstr == NULL && mycpu->gd_cpuid != 0) { 268 kprintf("Switching to cpu #0 for shutdown\n"); 269 lwkt_setcpu_self(globaldata_find(0)); 270 } 271 /* 272 * Do any callouts that should be done BEFORE syncing the filesystems. 273 */ 274 EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); 275 276 /* 277 * Try to get rid of any remaining FS references. The calling 278 * process, proc0, and init may still hold references. The 279 * VFS cache subsystem may still hold a root reference to root. 280 * 281 * XXX this needs work. We really need to SIGSTOP all remaining 282 * processes in order to avoid blowups due to proc0's filesystem 283 * references going away. For now just make sure that the init 284 * process is stopped. 285 */ 286 if (panicstr == NULL) { 287 shutdown_cleanup_proc(curproc); 288 shutdown_cleanup_proc(&proc0); 289 if (initproc) { 290 if (initproc != curproc) { 291 ksignal(initproc, SIGSTOP); 292 tsleep(boot, 0, "shutdn", hz / 20); 293 } 294 shutdown_cleanup_proc(initproc); 295 } 296 vfs_cache_setroot(NULL, NULL); 297 } 298 299 /* 300 * Now sync filesystems 301 */ 302 if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) { 303 int iter, nbusy, pbusy; 304 305 waittime = 0; 306 kprintf("\nsyncing disks... "); 307 308 sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ 309 310 /* 311 * With soft updates, some buffers that are 312 * written will be remarked as dirty until other 313 * buffers are written. 314 */ 315 for (iter = pbusy = 0; iter < 20; iter++) { 316 nbusy = scan_all_buffers(shutdown_busycount1, NULL); 317 if (nbusy == 0) 318 break; 319 kprintf("%d ", nbusy); 320 if (nbusy < pbusy) 321 iter = 0; 322 pbusy = nbusy; 323 /* 324 * XXX: 325 * Process soft update work queue if buffers don't sync 326 * after 6 iterations by permitting the syncer to run. 327 */ 328 if (iter > 5) 329 bio_ops_sync(NULL); 330 331 sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ 332 tsleep(boot, 0, "shutdn", hz * iter / 20 + 1); 333 } 334 kprintf("\n"); 335 /* 336 * Count only busy local buffers to prevent forcing 337 * a fsck if we're just a client of a wedged NFS server 338 */ 339 nbusy = scan_all_buffers(shutdown_busycount2, NULL); 340 if (nbusy) { 341 /* 342 * Failed to sync all blocks. Indicate this and don't 343 * unmount filesystems (thus forcing an fsck on reboot). 344 */ 345 kprintf("giving up on %d buffers\n", nbusy); 346 #ifdef DDB 347 if (debugger_on_panic) 348 Debugger("busy buffer problem"); 349 #endif /* DDB */ 350 tsleep(boot, 0, "shutdn", hz * 5 + 1); 351 } else { 352 kprintf("done\n"); 353 /* 354 * Unmount filesystems 355 */ 356 if (panicstr == NULL) 357 vfs_unmountall(); 358 } 359 tsleep(boot, 0, "shutdn", hz / 10 + 1); 360 } 361 362 print_uptime(); 363 364 /* 365 * Dump before doing post_sync shutdown ops 366 */ 367 crit_enter(); 368 if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold) { 369 dumpsys(); 370 } 371 372 /* 373 * Ok, now do things that assume all filesystem activity has 374 * been completed. This will also call the device shutdown 375 * methods. 376 */ 377 EVENTHANDLER_INVOKE(shutdown_post_sync, howto); 378 379 /* Now that we're going to really halt the system... */ 380 EVENTHANDLER_INVOKE(shutdown_final, howto); 381 382 for(;;) ; /* safety against shutdown_reset not working */ 383 /* NOTREACHED */ 384 } 385 386 /* 387 * Pass 1 - Figure out if there are any busy or dirty buffers still present. 388 * 389 * We ignore TMPFS mounts in this pass. 390 */ 391 static int 392 shutdown_busycount1(struct buf *bp, void *info) 393 { 394 struct vnode *vp; 395 396 if ((vp = bp->b_vp) != NULL && vp->v_tag == VT_TMPFS) 397 return (0); 398 if ((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp) > 0) 399 return(1); 400 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) 401 return (1); 402 return (0); 403 } 404 405 /* 406 * Pass 2 - only run after pass 1 has completed or has given up 407 * 408 * We ignore TMPFS, NFS, MFS, and SMBFS mounts in this pass. 409 */ 410 static int 411 shutdown_busycount2(struct buf *bp, void *info) 412 { 413 struct vnode *vp; 414 415 /* 416 * Ignore tmpfs and nfs mounts 417 */ 418 if ((vp = bp->b_vp) != NULL) { 419 if (vp->v_tag == VT_TMPFS) 420 return (0); 421 if (vp->v_tag == VT_NFS) 422 return (0); 423 if (vp->v_tag == VT_MFS) 424 return (0); 425 if (vp->v_tag == VT_SMBFS) 426 return (0); 427 } 428 429 /* 430 * Only count buffers stuck on I/O, ignore everything else 431 */ 432 if (((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp)) || 433 ((bp->b_flags & (B_DELWRI|B_INVAL)) == B_DELWRI)) { 434 /* 435 * Only count buffers undergoing write I/O 436 * on the related vnode. 437 */ 438 if (bp->b_vp == NULL || 439 bio_track_active(&bp->b_vp->v_track_write) == 0) { 440 return (0); 441 } 442 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC) 443 kprintf( 444 "%p dev:?, flags:%08x, loffset:%jd, doffset:%jd\n", 445 bp, 446 bp->b_flags, (intmax_t)bp->b_loffset, 447 (intmax_t)bp->b_bio2.bio_offset); 448 #endif 449 return(1); 450 } 451 return(0); 452 } 453 454 /* 455 * If the shutdown was a clean halt, behave accordingly. 456 */ 457 static void 458 shutdown_halt(void *junk, int howto) 459 { 460 if (howto & RB_HALT) { 461 kprintf("\n"); 462 kprintf("The operating system has halted.\n"); 463 #ifdef _KERNEL_VIRTUAL 464 cpu_halt(); 465 #else 466 kprintf("Please press any key to reboot.\n\n"); 467 switch (cngetc()) { 468 case -1: /* No console, just die */ 469 cpu_halt(); 470 /* NOTREACHED */ 471 default: 472 howto &= ~RB_HALT; 473 break; 474 } 475 #endif 476 } 477 } 478 479 /* 480 * Check to see if the system paniced, pause and then reboot 481 * according to the specified delay. 482 */ 483 static void 484 shutdown_panic(void *junk, int howto) 485 { 486 int loop; 487 488 if (howto & RB_DUMP) { 489 if (PANIC_REBOOT_WAIT_TIME != 0) { 490 if (PANIC_REBOOT_WAIT_TIME != -1) { 491 kprintf("Automatic reboot in %d seconds - " 492 "press a key on the console to abort\n", 493 PANIC_REBOOT_WAIT_TIME); 494 for (loop = PANIC_REBOOT_WAIT_TIME * 10; 495 loop > 0; --loop) { 496 DELAY(1000 * 100); /* 1/10th second */ 497 /* Did user type a key? */ 498 if (cncheckc() != -1) 499 break; 500 } 501 if (!loop) 502 return; 503 } 504 } else { /* zero time specified - reboot NOW */ 505 return; 506 } 507 kprintf("--> Press a key on the console to reboot,\n"); 508 kprintf("--> or switch off the system now.\n"); 509 cngetc(); 510 } 511 } 512 513 /* 514 * Everything done, now reset 515 */ 516 static void 517 shutdown_reset(void *junk, int howto) 518 { 519 kprintf("Rebooting...\n"); 520 DELAY(1000000); /* wait 1 sec for kprintf's to complete and be read */ 521 /* cpu_boot(howto); */ /* doesn't do anything at the moment */ 522 cpu_reset(); 523 /* NOTREACHED */ /* assuming reset worked */ 524 } 525 526 /* 527 * Try to remove FS references in the specified process. This function 528 * is used during shutdown 529 */ 530 static 531 void 532 shutdown_cleanup_proc(struct proc *p) 533 { 534 struct filedesc *fdp; 535 struct vmspace *vm; 536 537 if (p == NULL) 538 return; 539 if ((fdp = p->p_fd) != NULL) { 540 kern_closefrom(0); 541 if (fdp->fd_cdir) { 542 cache_drop(&fdp->fd_ncdir); 543 vrele(fdp->fd_cdir); 544 fdp->fd_cdir = NULL; 545 } 546 if (fdp->fd_rdir) { 547 cache_drop(&fdp->fd_nrdir); 548 vrele(fdp->fd_rdir); 549 fdp->fd_rdir = NULL; 550 } 551 if (fdp->fd_jdir) { 552 cache_drop(&fdp->fd_njdir); 553 vrele(fdp->fd_jdir); 554 fdp->fd_jdir = NULL; 555 } 556 } 557 if (p->p_vkernel) 558 vkernel_exit(p); 559 if (p->p_textvp) { 560 vrele(p->p_textvp); 561 p->p_textvp = NULL; 562 } 563 vm = p->p_vmspace; 564 if (vm != NULL) { 565 pmap_remove_pages(vmspace_pmap(vm), 566 VM_MIN_USER_ADDRESS, 567 VM_MAX_USER_ADDRESS); 568 vm_map_remove(&vm->vm_map, 569 VM_MIN_USER_ADDRESS, 570 VM_MAX_USER_ADDRESS); 571 } 572 } 573 574 /* 575 * Magic number for savecore 576 * 577 * exported (symorder) and used at least by savecore(8) 578 * 579 * Mark it as used so that gcc doesn't optimize it away. 580 */ 581 __attribute__((__used__)) 582 static u_long const dumpmag = 0x8fca0101UL; 583 584 __attribute__((__used__)) 585 static int dumpsize = 0; /* also for savecore */ 586 587 static int dodump = 1; 588 589 SYSCTL_INT(_machdep, OID_AUTO, do_dump, CTLFLAG_RW, &dodump, 0, 590 "Try to perform coredump on kernel panic"); 591 592 void 593 mkdumpheader(struct kerneldumpheader *kdh, char *magic, uint32_t archver, 594 uint64_t dumplen, uint32_t blksz) 595 { 596 bzero(kdh, sizeof(*kdh)); 597 strncpy(kdh->magic, magic, sizeof(kdh->magic)); 598 strncpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture)); 599 kdh->version = htod32(KERNELDUMPVERSION); 600 kdh->architectureversion = htod32(archver); 601 kdh->dumplength = htod64(dumplen); 602 kdh->dumptime = htod64(time_second); 603 kdh->blocksize = htod32(blksz); 604 strncpy(kdh->hostname, hostname, sizeof(kdh->hostname)); 605 strncpy(kdh->versionstring, version, sizeof(kdh->versionstring)); 606 if (panicstr != NULL) 607 strncpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring)); 608 kdh->parity = kerneldump_parity(kdh); 609 } 610 611 static int 612 setdumpdev(cdev_t dev) 613 { 614 int error; 615 int doopen; 616 617 if (dev == NULL) { 618 disk_dumpconf(NULL, 0/*off*/); 619 return (0); 620 } 621 622 /* 623 * We have to open the device before we can perform ioctls on it, 624 * or the slice/label data may not be present. Device opens are 625 * usually tracked by specfs, but the dump device can be set in 626 * early boot and may not be open so this is somewhat of a hack. 627 */ 628 doopen = (dev->si_sysref.refcnt == 1); 629 if (doopen) { 630 error = dev_dopen(dev, FREAD, S_IFCHR, proc0.p_ucred); 631 if (error) 632 return (error); 633 } 634 error = disk_dumpconf(dev, 1/*on*/); 635 636 return error; 637 } 638 639 /* ARGSUSED */ 640 static void dump_conf (void *dummy); 641 static void 642 dump_conf(void *dummy) 643 { 644 char *path; 645 cdev_t dev; 646 int _dummy; 647 648 path = kmalloc(MNAMELEN, M_TEMP, M_WAITOK); 649 if (TUNABLE_STR_FETCH("dumpdev", path, MNAMELEN) != 0) { 650 /* 651 * Make sure all disk devices created so far have also been 652 * probed, and also make sure that the newly created device 653 * nodes for probed disks are ready, too. 654 * 655 * XXX - Delay an additional 2 seconds to help drivers which 656 * pickup devices asynchronously and are not caught by 657 * CAM's initial probe. 658 */ 659 sync_devs(); 660 tsleep(&_dummy, 0, "syncer", hz*2); 661 662 dev = kgetdiskbyname(path); 663 if (dev != NULL) 664 dumpdev = dev; 665 } 666 kfree(path, M_TEMP); 667 if (setdumpdev(dumpdev) != 0) 668 dumpdev = NULL; 669 } 670 671 SYSINIT(dump_conf, SI_SUB_DUMP_CONF, SI_ORDER_FIRST, dump_conf, NULL) 672 673 static int 674 sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS) 675 { 676 int error; 677 udev_t ndumpdev; 678 679 ndumpdev = dev2udev(dumpdev); 680 error = sysctl_handle_opaque(oidp, &ndumpdev, sizeof ndumpdev, req); 681 if (error == 0 && req->newptr != NULL) 682 error = setdumpdev(udev2dev(ndumpdev, 0)); 683 return (error); 684 } 685 686 SYSCTL_PROC(_kern, KERN_DUMPDEV, dumpdev, CTLTYPE_OPAQUE|CTLFLAG_RW, 687 0, sizeof dumpdev, sysctl_kern_dumpdev, "T,udev_t", ""); 688 689 /* 690 * Panic is called on unresolvable fatal errors. It prints "panic: mesg", 691 * and then reboots. If we are called twice, then we avoid trying to sync 692 * the disks as this often leads to recursive panics. 693 */ 694 void 695 panic(const char *fmt, ...) 696 { 697 int bootopt, newpanic; 698 globaldata_t gd = mycpu; 699 thread_t td = gd->gd_curthread; 700 __va_list ap; 701 static char buf[256]; 702 703 /* 704 * If a panic occurs on multiple cpus before the first is able to 705 * halt the other cpus, only one cpu is allowed to take the panic. 706 * Attempt to be verbose about this situation but if the kprintf() 707 * itself panics don't let us overrun the kernel stack. 708 * 709 * Be very nasty about descheduling our thread at the lowest 710 * level possible in an attempt to freeze the thread without 711 * inducing further panics. 712 * 713 * Bumping gd_trap_nesting_level will also bypass assertions in 714 * lwkt_switch() and allow us to switch away even if we are a 715 * FAST interrupt or IPI. 716 * 717 * The setting of panic_cpu_gd also determines how kprintf() 718 * spin-locks itself. DDB can set panic_cpu_gd as well. 719 */ 720 for (;;) { 721 globaldata_t xgd = panic_cpu_gd; 722 723 /* 724 * Someone else got the panic cpu 725 */ 726 if (xgd && xgd != gd) { 727 crit_enter(); 728 ++mycpu->gd_trap_nesting_level; 729 if (mycpu->gd_trap_nesting_level < 25) { 730 kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", 731 mycpu->gd_cpuid, td); 732 } 733 td->td_release = NULL; /* be a grinch */ 734 for (;;) { 735 lwkt_deschedule_self(td); 736 lwkt_switch(); 737 } 738 /* NOT REACHED */ 739 /* --mycpu->gd_trap_nesting_level */ 740 /* crit_exit() */ 741 } 742 743 /* 744 * Reentrant panic 745 */ 746 if (xgd && xgd == gd) 747 break; 748 749 /* 750 * We got it 751 */ 752 if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd)) 753 break; 754 } 755 /* 756 * Try to get the system into a working state. Save information 757 * we are about to destroy. 758 */ 759 kvcreinitspin(); 760 if (panicstr == NULL) { 761 bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens)); 762 panic_tokens_count = td->td_toks_stop - &td->td_toks_base; 763 } 764 lwkt_relalltokens(td); 765 td->td_toks_stop = &td->td_toks_base; 766 767 /* 768 * Setup 769 */ 770 bootopt = RB_AUTOBOOT | RB_DUMP; 771 if (sync_on_panic == 0) 772 bootopt |= RB_NOSYNC; 773 newpanic = 0; 774 if (panicstr) { 775 bootopt |= RB_NOSYNC; 776 } else { 777 panicstr = fmt; 778 newpanic = 1; 779 } 780 781 /* 782 * Format the panic string. 783 */ 784 __va_start(ap, fmt); 785 kvsnprintf(buf, sizeof(buf), fmt, ap); 786 if (panicstr == fmt) 787 panicstr = buf; 788 __va_end(ap); 789 kprintf("panic: %s\n", buf); 790 /* two separate prints in case of an unmapped page and trap */ 791 kprintf("cpuid = %d\n", mycpu->gd_cpuid); 792 793 #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) 794 led_switch("error", 1); 795 #endif 796 797 #if defined(WDOG_DISABLE_ON_PANIC) 798 wdog_disable(); 799 #endif 800 801 /* 802 * Enter the debugger or fall through & dump. Entering the 803 * debugger will stop cpus. If not entering the debugger stop 804 * cpus here. 805 */ 806 #if defined(DDB) 807 if (newpanic && trace_on_panic) 808 print_backtrace(-1); 809 if (debugger_on_panic) 810 Debugger("panic"); 811 else 812 #endif 813 if (newpanic) 814 stop_cpus(mycpu->gd_other_cpus); 815 boot(bootopt); 816 } 817 818 /* 819 * Support for poweroff delay. 820 */ 821 #ifndef POWEROFF_DELAY 822 # define POWEROFF_DELAY 5000 823 #endif 824 static int poweroff_delay = POWEROFF_DELAY; 825 826 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW, 827 &poweroff_delay, 0, ""); 828 829 static void 830 poweroff_wait(void *junk, int howto) 831 { 832 if(!(howto & RB_POWEROFF) || poweroff_delay <= 0) 833 return; 834 DELAY(poweroff_delay * 1000); 835 } 836 837 /* 838 * Some system processes (e.g. syncer) need to be stopped at appropriate 839 * points in their main loops prior to a system shutdown, so that they 840 * won't interfere with the shutdown process (e.g. by holding a disk buf 841 * to cause sync to fail). For each of these system processes, register 842 * shutdown_kproc() as a handler for one of shutdown events. 843 */ 844 static int kproc_shutdown_wait = 60; 845 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW, 846 &kproc_shutdown_wait, 0, ""); 847 848 void 849 shutdown_kproc(void *arg, int howto) 850 { 851 struct thread *td; 852 struct proc *p; 853 int error; 854 855 if (panicstr) 856 return; 857 858 td = (struct thread *)arg; 859 if ((p = td->td_proc) != NULL) { 860 kprintf("Waiting (max %d seconds) for system process `%s' to stop...", 861 kproc_shutdown_wait, p->p_comm); 862 } else { 863 kprintf("Waiting (max %d seconds) for system thread %s to stop...", 864 kproc_shutdown_wait, td->td_comm); 865 } 866 error = suspend_kproc(td, kproc_shutdown_wait * hz); 867 868 if (error == EWOULDBLOCK) 869 kprintf("timed out\n"); 870 else 871 kprintf("stopped\n"); 872 } 873 874 /* Registration of dumpers */ 875 int 876 set_dumper(struct dumperinfo *di) 877 { 878 if (di == NULL) { 879 bzero(&dumper, sizeof(dumper)); 880 return 0; 881 } 882 883 if (dumper.dumper != NULL) 884 return (EBUSY); 885 886 dumper = *di; 887 return 0; 888 } 889 890 void 891 dumpsys(void) 892 { 893 #if defined (_KERNEL_VIRTUAL) 894 /* VKERNELs don't support dumps */ 895 kprintf("VKERNEL doesn't support dumps\n"); 896 return; 897 #endif 898 /* 899 * If there is a dumper registered and we aren't dumping already, call 900 * the machine dependent dumpsys (md_dumpsys) to do the hard work. 901 * 902 * XXX: while right now the md_dumpsys() of x86 and x86_64 could be 903 * factored out completely into here, I rather keep them machine 904 * dependent in case we ever add a platform which does not share 905 * the same dumpsys() code, such as arm. 906 */ 907 if (dumper.dumper != NULL && !dumping) { 908 dumping++; 909 md_dumpsys(&dumper); 910 } 911 } 912 913 int dump_stop_usertds = 0; 914 915 static 916 void 917 need_user_resched_remote(void *dummy) 918 { 919 need_user_resched(); 920 } 921 922 void 923 dump_reactivate_cpus(void) 924 { 925 globaldata_t gd; 926 int cpu, seq; 927 928 dump_stop_usertds = 1; 929 930 need_user_resched(); 931 932 for (cpu = 0; cpu < ncpus; cpu++) { 933 gd = globaldata_find(cpu); 934 seq = lwkt_send_ipiq(gd, need_user_resched_remote, NULL); 935 lwkt_wait_ipiq(gd, seq); 936 } 937 938 restart_cpus(stopped_cpus); 939 } 940