xref: /dragonfly/sys/kern/kern_shutdown.c (revision 4a65f651)
1 /*-
2  * Copyright (c) 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_shutdown.c	8.3 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_shutdown.c,v 1.72.2.12 2002/02/21 19:15:10 dillon Exp $
40  * $DragonFly: src/sys/kern/kern_shutdown.c,v 1.62 2008/01/05 13:23:48 corecode Exp $
41  */
42 
43 #include "opt_ddb.h"
44 #include "opt_ddb_trace.h"
45 #include "opt_hw_wdog.h"
46 #include "opt_panic.h"
47 #include "opt_show_busybufs.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/eventhandler.h>
52 #include <sys/buf.h>
53 #include <sys/diskslice.h>
54 #include <sys/reboot.h>
55 #include <sys/proc.h>
56 #include <sys/priv.h>
57 #include <sys/fcntl.h>		/* FREAD	*/
58 #include <sys/stat.h>		/* S_IFCHR	*/
59 #include <sys/vnode.h>
60 #include <sys/kernel.h>
61 #include <sys/kthread.h>
62 #include <sys/malloc.h>
63 #include <sys/mount.h>
64 #include <sys/queue.h>
65 #include <sys/sysctl.h>
66 #include <sys/vkernel.h>
67 #include <sys/conf.h>
68 #include <sys/sysproto.h>
69 #include <sys/device.h>
70 #include <sys/cons.h>
71 #include <sys/shm.h>
72 #include <sys/kern_syscall.h>
73 #include <vm/vm_map.h>
74 #include <vm/pmap.h>
75 
76 #include <sys/thread2.h>
77 #include <sys/buf2.h>
78 
79 #include <machine/pcb.h>
80 #include <machine/clock.h>
81 #include <machine/md_var.h>
82 #include <machine/smp.h>		/* smp_active_mask, cpuid */
83 #include <machine/vmparam.h>
84 
85 #include <sys/signalvar.h>
86 
87 #ifndef PANIC_REBOOT_WAIT_TIME
88 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
89 #endif
90 
91 /*
92  * Note that stdarg.h and the ANSI style va_start macro is used for both
93  * ANSI and traditional C compilers.  We use the machine version to stay
94  * within the confines of the kernel header files.
95  */
96 #include <machine/stdarg.h>
97 
98 #ifdef DDB
99 #include <ddb/ddb.h>
100 #ifdef DDB_UNATTENDED
101 int debugger_on_panic = 0;
102 #else
103 int debugger_on_panic = 1;
104 #endif
105 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic, CTLFLAG_RW,
106 	&debugger_on_panic, 0, "Run debugger on kernel panic");
107 
108 #ifdef DDB_TRACE
109 int trace_on_panic = 1;
110 #else
111 int trace_on_panic = 0;
112 #endif
113 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic, CTLFLAG_RW,
114 	&trace_on_panic, 0, "Print stack trace on kernel panic");
115 #endif
116 
117 static int sync_on_panic = 1;
118 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RW,
119 	&sync_on_panic, 0, "Do a sync before rebooting from a panic");
120 
121 SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment");
122 
123 #ifdef	HW_WDOG
124 /*
125  * If there is a hardware watchdog, point this at the function needed to
126  * hold it off.
127  * It's needed when the kernel needs to do some lengthy operations.
128  * e.g. in wd.c when dumping core.. It's most annoying to have
129  * your precious core-dump only half written because the wdog kicked in.
130  */
131 watchdog_tickle_fn wdog_tickler = NULL;
132 #endif	/* HW_WDOG */
133 
134 /*
135  * Variable panicstr contains argument to first call to panic; used as flag
136  * to indicate that the kernel has already called panic.
137  */
138 const char *panicstr;
139 
140 int dumping;				/* system is dumping */
141 #ifdef SMP
142 u_int panic_cpu_interlock;		/* panic interlock */
143 globaldata_t panic_cpu_gd;		/* which cpu took the panic */
144 #endif
145 
146 int bootverbose = 0;			/* note: assignment to force non-bss */
147 SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW,
148 	   &bootverbose, 0, "Verbose kernel messages");
149 
150 int cold = 1;				/* note: assignment to force non-bss */
151 int dumplo;				/* OBSOLETE - savecore compat */
152 u_int64_t dumplo64;
153 
154 static void boot (int) __dead2;
155 static void dumpsys (void);
156 static int setdumpdev (cdev_t dev);
157 static void poweroff_wait (void *, int);
158 static void print_uptime (void);
159 static void shutdown_halt (void *junk, int howto);
160 static void shutdown_panic (void *junk, int howto);
161 static void shutdown_reset (void *junk, int howto);
162 static int shutdown_busycount1(struct buf *bp, void *info);
163 static int shutdown_busycount2(struct buf *bp, void *info);
164 static void shutdown_cleanup_proc(struct proc *p);
165 
166 /* register various local shutdown events */
167 static void
168 shutdown_conf(void *unused)
169 {
170 	EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL, SHUTDOWN_PRI_FIRST);
171 	EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL, SHUTDOWN_PRI_LAST + 100);
172 	EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL, SHUTDOWN_PRI_LAST + 100);
173 	EVENTHANDLER_REGISTER(shutdown_final, shutdown_reset, NULL, SHUTDOWN_PRI_LAST + 200);
174 }
175 
176 SYSINIT(shutdown_conf, SI_BOOT2_MACHDEP, SI_ORDER_ANY, shutdown_conf, NULL)
177 
178 /* ARGSUSED */
179 
180 /*
181  * The system call that results in a reboot
182  */
183 int
184 sys_reboot(struct reboot_args *uap)
185 {
186 	struct thread *td = curthread;
187 	int error;
188 
189 	if ((error = priv_check(td, PRIV_REBOOT)))
190 		return (error);
191 
192 	boot(uap->opt);
193 	return (0);
194 }
195 
196 /*
197  * Called by events that want to shut down.. e.g  <CTL><ALT><DEL> on a PC
198  */
199 static int shutdown_howto = 0;
200 
201 void
202 shutdown_nice(int howto)
203 {
204 	shutdown_howto = howto;
205 
206 	/* Send a signal to init(8) and have it shutdown the world */
207 	if (initproc != NULL) {
208 		ksignal(initproc, SIGINT);
209 	} else {
210 		/* No init(8) running, so simply reboot */
211 		boot(RB_NOSYNC);
212 	}
213 	return;
214 }
215 static int	waittime = -1;
216 static struct thread *dumpthread;
217 static struct pcb dumppcb;
218 
219 static void
220 print_uptime(void)
221 {
222 	int f;
223 	struct timespec ts;
224 
225 	getnanouptime(&ts);
226 	kprintf("Uptime: ");
227 	f = 0;
228 	if (ts.tv_sec >= 86400) {
229 		kprintf("%ldd", ts.tv_sec / 86400);
230 		ts.tv_sec %= 86400;
231 		f = 1;
232 	}
233 	if (f || ts.tv_sec >= 3600) {
234 		kprintf("%ldh", ts.tv_sec / 3600);
235 		ts.tv_sec %= 3600;
236 		f = 1;
237 	}
238 	if (f || ts.tv_sec >= 60) {
239 		kprintf("%ldm", ts.tv_sec / 60);
240 		ts.tv_sec %= 60;
241 		f = 1;
242 	}
243 	kprintf("%lds\n", ts.tv_sec);
244 }
245 
246 /*
247  *  Go through the rigmarole of shutting down..
248  * this used to be in machdep.c but I'll be dammned if I could see
249  * anything machine dependant in it.
250  */
251 static void
252 boot(int howto)
253 {
254 	/*
255 	 * Get rid of any user scheduler baggage and then give
256 	 * us a high priority.
257 	 */
258 	if (curthread->td_release)
259 		curthread->td_release(curthread);
260 	lwkt_setpri_self(TDPRI_MAX);
261 
262 	/* collect extra flags that shutdown_nice might have set */
263 	howto |= shutdown_howto;
264 
265 #ifdef SMP
266 	/*
267 	 * We really want to shutdown on the BSP.  Subsystems such as ACPI
268 	 * can't power-down the box otherwise.
269 	 */
270 	if (smp_active_mask > 1) {
271 		kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid);
272 	}
273 	if (panicstr == NULL && mycpu->gd_cpuid != 0) {
274 		kprintf("Switching to cpu #0 for shutdown\n");
275 		lwkt_setcpu_self(globaldata_find(0));
276 	}
277 #endif
278 	/*
279 	 * Do any callouts that should be done BEFORE syncing the filesystems.
280 	 */
281 	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);
282 
283 	/*
284 	 * Try to get rid of any remaining FS references.  The calling
285 	 * process, proc0, and init may still hold references.  The
286 	 * VFS cache subsystem may still hold a root reference to root.
287 	 *
288 	 * XXX this needs work.  We really need to SIGSTOP all remaining
289 	 * processes in order to avoid blowups due to proc0's filesystem
290 	 * references going away.  For now just make sure that the init
291 	 * process is stopped.
292 	 */
293 	if (panicstr == NULL) {
294 		shutdown_cleanup_proc(curproc);
295 		shutdown_cleanup_proc(&proc0);
296 		if (initproc) {
297 			if (initproc != curproc) {
298 				ksignal(initproc, SIGSTOP);
299 				tsleep(boot, 0, "shutdn", hz / 20);
300 			}
301 			shutdown_cleanup_proc(initproc);
302 		}
303 		vfs_cache_setroot(NULL, NULL);
304 	}
305 
306 	/*
307 	 * Now sync filesystems
308 	 */
309 	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
310 		int iter, nbusy, pbusy;
311 
312 		waittime = 0;
313 		kprintf("\nsyncing disks... ");
314 
315 		sys_sync(NULL);	/* YYY was sync(&proc0, NULL). why proc0 ? */
316 
317 		/*
318 		 * With soft updates, some buffers that are
319 		 * written will be remarked as dirty until other
320 		 * buffers are written.
321 		 */
322 		for (iter = pbusy = 0; iter < 20; iter++) {
323 			nbusy = scan_all_buffers(shutdown_busycount1, NULL);
324 			if (nbusy == 0)
325 				break;
326 			kprintf("%d ", nbusy);
327 			if (nbusy < pbusy)
328 				iter = 0;
329 			pbusy = nbusy;
330 			/*
331 			 * XXX:
332 			 * Process soft update work queue if buffers don't sync
333 			 * after 6 iterations by permitting the syncer to run.
334 			 */
335 			if (iter > 5)
336 				bio_ops_sync(NULL);
337 
338 			sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */
339 			tsleep(boot, 0, "shutdn", hz * iter / 20 + 1);
340 		}
341 		kprintf("\n");
342 		/*
343 		 * Count only busy local buffers to prevent forcing
344 		 * a fsck if we're just a client of a wedged NFS server
345 		 */
346 		nbusy = scan_all_buffers(shutdown_busycount2, NULL);
347 		if (nbusy) {
348 			/*
349 			 * Failed to sync all blocks. Indicate this and don't
350 			 * unmount filesystems (thus forcing an fsck on reboot).
351 			 */
352 			kprintf("giving up on %d buffers\n", nbusy);
353 #ifdef DDB
354 			Debugger("busy buffer problem");
355 #endif /* DDB */
356 			tsleep(boot, 0, "shutdn", hz * 5 + 1);
357 		} else {
358 			kprintf("done\n");
359 			/*
360 			 * Unmount filesystems
361 			 */
362 			if (panicstr == NULL)
363 				vfs_unmountall();
364 		}
365 		tsleep(boot, 0, "shutdn", hz / 10 + 1);
366 	}
367 
368 	print_uptime();
369 
370 	/*
371 	 * Dump before doing post_sync shutdown ops
372 	 */
373 	crit_enter();
374 	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold)
375 		dumpsys();
376 
377 	/*
378 	 * Ok, now do things that assume all filesystem activity has
379 	 * been completed.  This will also call the device shutdown
380 	 * methods.
381 	 */
382 	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);
383 
384 	/* Now that we're going to really halt the system... */
385 	EVENTHANDLER_INVOKE(shutdown_final, howto);
386 
387 	for(;;) ;	/* safety against shutdown_reset not working */
388 	/* NOTREACHED */
389 }
390 
391 static int
392 shutdown_busycount1(struct buf *bp, void *info)
393 {
394 	if ((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp) > 0)
395 		return(1);
396 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI)
397 		return (1);
398 	return (0);
399 }
400 
401 static int
402 shutdown_busycount2(struct buf *bp, void *info)
403 {
404 	if (((bp->b_flags & B_INVAL) == 0 && BUF_REFCNT(bp)) ||
405 	    ((bp->b_flags & (B_DELWRI|B_INVAL)) == B_DELWRI)) {
406 		/*
407 		 * Only count buffers undergoing write I/O
408 		 * on the related vnode.
409 		 */
410 		if (bp->b_vp == NULL ||
411 		    bio_track_active(&bp->b_vp->v_track_write) == 0) {
412 			return (0);
413 		}
414 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC)
415 		kprintf(
416 	    "%p dev:?, flags:%08x, loffset:%lld, doffset:%lld\n",
417 		    bp,
418 		    bp->b_flags, bp->b_loffset,
419 		    bp->b_bio2.bio_offset);
420 #endif
421 		return(1);
422 	}
423 	return(0);
424 }
425 
426 /*
427  * If the shutdown was a clean halt, behave accordingly.
428  */
429 static void
430 shutdown_halt(void *junk, int howto)
431 {
432 	if (howto & RB_HALT) {
433 		kprintf("\n");
434 		kprintf("The operating system has halted.\n");
435 #ifdef _KERNEL_VIRTUAL
436 		cpu_halt();
437 #else
438 		kprintf("Please press any key to reboot.\n\n");
439 		switch (cngetc()) {
440 		case -1:		/* No console, just die */
441 			cpu_halt();
442 			/* NOTREACHED */
443 		default:
444 			howto &= ~RB_HALT;
445 			break;
446 		}
447 #endif
448 	}
449 }
450 
451 /*
452  * Check to see if the system paniced, pause and then reboot
453  * according to the specified delay.
454  */
455 static void
456 shutdown_panic(void *junk, int howto)
457 {
458 	int loop;
459 
460 	if (howto & RB_DUMP) {
461 		if (PANIC_REBOOT_WAIT_TIME != 0) {
462 			if (PANIC_REBOOT_WAIT_TIME != -1) {
463 				kprintf("Automatic reboot in %d seconds - "
464 				       "press a key on the console to abort\n",
465 					PANIC_REBOOT_WAIT_TIME);
466 				for (loop = PANIC_REBOOT_WAIT_TIME * 10;
467 				     loop > 0; --loop) {
468 					DELAY(1000 * 100); /* 1/10th second */
469 					/* Did user type a key? */
470 					if (cncheckc() != -1)
471 						break;
472 				}
473 				if (!loop)
474 					return;
475 			}
476 		} else { /* zero time specified - reboot NOW */
477 			return;
478 		}
479 		kprintf("--> Press a key on the console to reboot,\n");
480 		kprintf("--> or switch off the system now.\n");
481 		cngetc();
482 	}
483 }
484 
485 /*
486  * Everything done, now reset
487  */
488 static void
489 shutdown_reset(void *junk, int howto)
490 {
491 	kprintf("Rebooting...\n");
492 	DELAY(1000000);	/* wait 1 sec for kprintf's to complete and be read */
493 	/* cpu_boot(howto); */ /* doesn't do anything at the moment */
494 	cpu_reset();
495 	/* NOTREACHED */ /* assuming reset worked */
496 }
497 
498 /*
499  * Try to remove FS references in the specified process.  This function
500  * is used during shutdown
501  */
502 static
503 void
504 shutdown_cleanup_proc(struct proc *p)
505 {
506 	struct filedesc *fdp;
507 	struct vmspace *vm;
508 
509 	if (p == NULL)
510 		return;
511 	if ((fdp = p->p_fd) != NULL) {
512 		kern_closefrom(0);
513 		if (fdp->fd_cdir) {
514 			cache_drop(&fdp->fd_ncdir);
515 			vrele(fdp->fd_cdir);
516 			fdp->fd_cdir = NULL;
517 		}
518 		if (fdp->fd_rdir) {
519 			cache_drop(&fdp->fd_nrdir);
520 			vrele(fdp->fd_rdir);
521 			fdp->fd_rdir = NULL;
522 		}
523 		if (fdp->fd_jdir) {
524 			cache_drop(&fdp->fd_njdir);
525 			vrele(fdp->fd_jdir);
526 			fdp->fd_jdir = NULL;
527 		}
528 	}
529 	if (p->p_vkernel)
530 		vkernel_exit(p);
531 	if (p->p_textvp) {
532 		vrele(p->p_textvp);
533 		p->p_textvp = NULL;
534 	}
535 	vm = p->p_vmspace;
536 	if (vm != NULL) {
537 		pmap_remove_pages(vmspace_pmap(vm),
538 				  VM_MIN_USER_ADDRESS,
539 				  VM_MAX_USER_ADDRESS);
540 		vm_map_remove(&vm->vm_map,
541 			      VM_MIN_USER_ADDRESS,
542 			      VM_MAX_USER_ADDRESS);
543 	}
544 }
545 
546 /*
547  * Magic number for savecore
548  *
549  * exported (symorder) and used at least by savecore(8)
550  *
551  * Mark it as used so that gcc doesn't optimize it away.
552  */
553 __attribute__((__used__))
554 	static u_long const dumpmag = 0x8fca0101UL;
555 
556 static int	dumpsize = 0;		/* also for savecore */
557 
558 static int	dodump = 1;
559 
560 SYSCTL_INT(_machdep, OID_AUTO, do_dump, CTLFLAG_RW, &dodump, 0,
561     "Try to perform coredump on kernel panic");
562 
563 static int
564 setdumpdev(cdev_t dev)
565 {
566 	struct partinfo pinfo;
567 	u_int64_t newdumplo;
568 	int error;
569 	int doopen;
570 
571 	if (dev == NULL) {
572 		dumpdev = dev;
573 		return (0);
574 	}
575 	bzero(&pinfo, sizeof(pinfo));
576 
577 	/*
578 	 * We have to open the device before we can perform ioctls on it,
579 	 * or the slice/label data may not be present.  Device opens are
580 	 * usually tracked by specfs, but the dump device can be set in
581 	 * early boot and may not be open so this is somewhat of a hack.
582 	 */
583 	doopen = (dev->si_sysref.refcnt == 1);
584 	if (doopen) {
585 		error = dev_dopen(dev, FREAD, S_IFCHR, proc0.p_ucred);
586 		if (error)
587 			return (error);
588 	}
589 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
590 			   proc0.p_ucred, NULL);
591 	if (doopen)
592 		dev_dclose(dev, FREAD, S_IFCHR);
593 	if (error || pinfo.media_blocks == 0 || pinfo.media_blksize == 0)
594 		return (ENXIO);
595 
596 	newdumplo = pinfo.media_blocks -
597 		    ((u_int64_t)Maxmem * PAGE_SIZE / DEV_BSIZE);
598 	if ((int64_t)newdumplo < (int64_t)pinfo.reserved_blocks)
599 		return (ENOSPC);
600 	dumpdev = dev;
601 	dumplo64 = newdumplo;
602 	return (0);
603 }
604 
605 
606 /* ARGSUSED */
607 static void dump_conf (void *dummy);
608 static void
609 dump_conf(void *dummy)
610 {
611 	char *path;
612 	cdev_t dev;
613 
614 	path = kmalloc(MNAMELEN, M_TEMP, M_WAITOK);
615 	if (TUNABLE_STR_FETCH("dumpdev", path, MNAMELEN) != 0) {
616 		dev = kgetdiskbyname(path);
617 		if (dev != NULL)
618 			dumpdev = dev;
619 	}
620 	kfree(path, M_TEMP);
621 	if (setdumpdev(dumpdev) != 0)
622 		dumpdev = NULL;
623 }
624 
625 SYSINIT(dump_conf, SI_SUB_DUMP_CONF, SI_ORDER_FIRST, dump_conf, NULL)
626 
627 static int
628 sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS)
629 {
630 	int error;
631 	udev_t ndumpdev;
632 
633 	ndumpdev = dev2udev(dumpdev);
634 	error = sysctl_handle_opaque(oidp, &ndumpdev, sizeof ndumpdev, req);
635 	if (error == 0 && req->newptr != NULL)
636 		error = setdumpdev(udev2dev(ndumpdev, 0));
637 	return (error);
638 }
639 
640 SYSCTL_PROC(_kern, KERN_DUMPDEV, dumpdev, CTLTYPE_OPAQUE|CTLFLAG_RW,
641 	0, sizeof dumpdev, sysctl_kern_dumpdev, "T,udev_t", "");
642 
643 /*
644  * Doadump comes here after turning off memory management and
645  * getting on the dump stack, either when called above, or by
646  * the auto-restart code.
647  */
648 static void
649 dumpsys(void)
650 {
651 	int	error;
652 
653 	savectx(&dumppcb);
654 	dumpthread = curthread;
655 	if (dumping++) {
656 		kprintf("Dump already in progress, bailing...\n");
657 		return;
658 	}
659 	if (!dodump)
660 		return;
661 	if (dumpdev == NULL)
662 		return;
663 	dumpsize = Maxmem;
664 	kprintf("\ndumping to dev %s, blockno %lld\n",
665 		devtoname(dumpdev),
666 		(long long)dumplo64);
667 	kprintf("dump ");
668 	error = dev_ddump(dumpdev);
669 	if (error == 0) {
670 		kprintf("succeeded\n");
671 		return;
672 	}
673 	kprintf("failed, reason: ");
674 	switch (error) {
675 	case ENOSYS:
676 	case ENODEV:
677 		kprintf("device doesn't support a dump routine\n");
678 		break;
679 
680 	case ENXIO:
681 		kprintf("device bad\n");
682 		break;
683 
684 	case EFAULT:
685 		kprintf("device not ready\n");
686 		break;
687 
688 	case EINVAL:
689 		kprintf("area improper\n");
690 		break;
691 
692 	case EIO:
693 		kprintf("i/o error\n");
694 		break;
695 
696 	case EINTR:
697 		kprintf("aborted from console\n");
698 		break;
699 
700 	default:
701 		kprintf("unknown, error = %d\n", error);
702 		break;
703 	}
704 }
705 
706 int
707 dumpstatus(vm_offset_t addr, off_t count)
708 {
709 	int c;
710 
711 	if (addr % (1024 * 1024) == 0) {
712 #ifdef HW_WDOG
713 		if (wdog_tickler)
714 			(*wdog_tickler)();
715 #endif
716 		kprintf("%ld ", (long)(count / (1024 * 1024)));
717 	}
718 
719 	if ((c = cncheckc()) == 0x03)
720 		return -1;
721 	else if (c != -1)
722 		kprintf("[CTRL-C to abort] ");
723 
724 	return 0;
725 }
726 
727 /*
728  * Panic is called on unresolvable fatal errors.  It prints "panic: mesg",
729  * and then reboots.  If we are called twice, then we avoid trying to sync
730  * the disks as this often leads to recursive panics.
731  */
732 void
733 panic(const char *fmt, ...)
734 {
735 	int bootopt, newpanic;
736 	__va_list ap;
737 	static char buf[256];
738 
739 #ifdef SMP
740 	/*
741 	 * If a panic occurs on multiple cpus before the first is able to
742 	 * halt the other cpus, only one cpu is allowed to take the panic.
743 	 * Attempt to be verbose about this situation but if the kprintf()
744 	 * itself panics don't let us overrun the kernel stack.
745 	 *
746 	 * Be very nasty about descheduling our thread at the lowest
747 	 * level possible in an attempt to freeze the thread without
748 	 * inducing further panics.
749 	 *
750 	 * Bumping gd_trap_nesting_level will also bypass assertions in
751 	 * lwkt_switch() and allow us to switch away even if we are a
752 	 * FAST interrupt or IPI.
753 	 */
754 	if (atomic_poll_acquire_int(&panic_cpu_interlock)) {
755 		panic_cpu_gd = mycpu;
756 	} else if (panic_cpu_gd != mycpu) {
757 		crit_enter();
758 		++mycpu->gd_trap_nesting_level;
759 		if (mycpu->gd_trap_nesting_level < 25) {
760 			kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n",
761 				mycpu->gd_cpuid, curthread);
762 		}
763 		curthread->td_release = NULL;	/* be a grinch */
764 		for (;;) {
765 			lwkt_deschedule_self(curthread);
766 			lwkt_switch();
767 		}
768 		/* NOT REACHED */
769 		/* --mycpu->gd_trap_nesting_level */
770 		/* crit_exit() */
771 	}
772 #endif
773 	bootopt = RB_AUTOBOOT | RB_DUMP;
774 	if (sync_on_panic == 0)
775 		bootopt |= RB_NOSYNC;
776 	newpanic = 0;
777 	if (panicstr)
778 		bootopt |= RB_NOSYNC;
779 	else {
780 		panicstr = fmt;
781 		newpanic = 1;
782 	}
783 
784 	__va_start(ap, fmt);
785 	kvsnprintf(buf, sizeof(buf), fmt, ap);
786 	if (panicstr == fmt)
787 		panicstr = buf;
788 	__va_end(ap);
789 	kprintf("panic: %s\n", buf);
790 #ifdef SMP
791 	/* two separate prints in case of an unmapped page and trap */
792 	kprintf("mp_lock = %08x; ", mp_lock);
793 	kprintf("cpuid = %d\n", mycpu->gd_cpuid);
794 #endif
795 
796 #if defined(DDB)
797 	if (newpanic && trace_on_panic)
798 		print_backtrace();
799 	if (debugger_on_panic)
800 		Debugger("panic");
801 #endif
802 	boot(bootopt);
803 }
804 
805 /*
806  * Support for poweroff delay.
807  */
808 #ifndef POWEROFF_DELAY
809 # define POWEROFF_DELAY 5000
810 #endif
811 static int poweroff_delay = POWEROFF_DELAY;
812 
813 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW,
814 	&poweroff_delay, 0, "");
815 
816 static void
817 poweroff_wait(void *junk, int howto)
818 {
819 	if(!(howto & RB_POWEROFF) || poweroff_delay <= 0)
820 		return;
821 	DELAY(poweroff_delay * 1000);
822 }
823 
824 /*
825  * Some system processes (e.g. syncer) need to be stopped at appropriate
826  * points in their main loops prior to a system shutdown, so that they
827  * won't interfere with the shutdown process (e.g. by holding a disk buf
828  * to cause sync to fail).  For each of these system processes, register
829  * shutdown_kproc() as a handler for one of shutdown events.
830  */
831 static int kproc_shutdown_wait = 60;
832 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW,
833     &kproc_shutdown_wait, 0, "");
834 
835 void
836 shutdown_kproc(void *arg, int howto)
837 {
838 	struct thread *td;
839 	struct proc *p;
840 	int error;
841 
842 	if (panicstr)
843 		return;
844 
845 	td = (struct thread *)arg;
846 	if ((p = td->td_proc) != NULL) {
847 	    kprintf("Waiting (max %d seconds) for system process `%s' to stop...",
848 		kproc_shutdown_wait, p->p_comm);
849 	} else {
850 	    kprintf("Waiting (max %d seconds) for system thread %s to stop...",
851 		kproc_shutdown_wait, td->td_comm);
852 	}
853 	error = suspend_kproc(td, kproc_shutdown_wait * hz);
854 
855 	if (error == EWOULDBLOCK)
856 		kprintf("timed out\n");
857 	else
858 		kprintf("stopped\n");
859 }
860