xref: /freebsd/sys/amd64/amd64/minidump_machdep.c (revision c697fb7f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_pmap.h"
33 #include "opt_watchdog.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/conf.h>
38 #include <sys/cons.h>
39 #include <sys/kernel.h>
40 #include <sys/kerneldump.h>
41 #include <sys/msgbuf.h>
42 #include <sys/sysctl.h>
43 #include <sys/watchdog.h>
44 #include <sys/vmmeter.h>
45 #include <vm/vm.h>
46 #include <vm/vm_param.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_phys.h>
49 #include <vm/pmap.h>
50 #include <machine/atomic.h>
51 #include <machine/elf.h>
52 #include <machine/md_var.h>
53 #include <machine/minidump.h>
54 
55 CTASSERT(sizeof(struct kerneldumpheader) == 512);
56 
57 uint64_t *vm_page_dump;
58 int vm_page_dump_size;
59 
60 static struct kerneldumpheader kdh;
61 
62 /* Handle chunked writes. */
63 static size_t fragsz;
64 static void *dump_va;
65 static size_t counter, progress, dumpsize, wdog_next;
66 
67 CTASSERT(sizeof(*vm_page_dump) == 8);
68 static int dump_retry_count = 5;
69 SYSCTL_INT(_machdep, OID_AUTO, dump_retry_count, CTLFLAG_RWTUN,
70     &dump_retry_count, 0, "Number of times dump has to retry before bailing out");
71 
72 static int
73 is_dumpable(vm_paddr_t pa)
74 {
75 	vm_page_t m;
76 	int i;
77 
78 	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
79 		return ((m->flags & PG_NODUMP) == 0);
80 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
81 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
82 			return (1);
83 	}
84 	return (0);
85 }
86 
87 #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8)
88 
89 static int
90 blk_flush(struct dumperinfo *di)
91 {
92 	int error;
93 
94 	if (fragsz == 0)
95 		return (0);
96 
97 	error = dump_append(di, dump_va, 0, fragsz);
98 	fragsz = 0;
99 	return (error);
100 }
101 
102 static struct {
103 	int min_per;
104 	int max_per;
105 	int visited;
106 } progress_track[10] = {
107 	{  0,  10, 0},
108 	{ 10,  20, 0},
109 	{ 20,  30, 0},
110 	{ 30,  40, 0},
111 	{ 40,  50, 0},
112 	{ 50,  60, 0},
113 	{ 60,  70, 0},
114 	{ 70,  80, 0},
115 	{ 80,  90, 0},
116 	{ 90, 100, 0}
117 };
118 
119 static void
120 report_progress(size_t progress, size_t dumpsize)
121 {
122 	int sofar, i;
123 
124 	sofar = 100 - ((progress * 100) / dumpsize);
125 	for (i = 0; i < nitems(progress_track); i++) {
126 		if (sofar < progress_track[i].min_per ||
127 		    sofar > progress_track[i].max_per)
128 			continue;
129 		if (progress_track[i].visited)
130 			return;
131 		progress_track[i].visited = 1;
132 		printf("..%d%%", sofar);
133 		return;
134 	}
135 }
136 
137 /* Pat the watchdog approximately every 128MB of the dump. */
138 #define	WDOG_DUMP_INTERVAL	(128 * 1024 * 1024)
139 
140 static int
141 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
142 {
143 	size_t len;
144 	int error, i, c;
145 	u_int maxdumpsz;
146 
147 	maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
148 	if (maxdumpsz == 0)	/* seatbelt */
149 		maxdumpsz = PAGE_SIZE;
150 	error = 0;
151 	if ((sz % PAGE_SIZE) != 0) {
152 		printf("size not page aligned\n");
153 		return (EINVAL);
154 	}
155 	if (ptr != NULL && pa != 0) {
156 		printf("cant have both va and pa!\n");
157 		return (EINVAL);
158 	}
159 	if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
160 		printf("address not page aligned %p\n", ptr);
161 		return (EINVAL);
162 	}
163 	if (ptr != NULL) {
164 		/* If we're doing a virtual dump, flush any pre-existing pa pages */
165 		error = blk_flush(di);
166 		if (error)
167 			return (error);
168 	}
169 	while (sz) {
170 		len = maxdumpsz - fragsz;
171 		if (len > sz)
172 			len = sz;
173 		counter += len;
174 		progress -= len;
175 		if (counter >> 24) {
176 			report_progress(progress, dumpsize);
177 			counter &= (1<<24) - 1;
178 		}
179 		if (progress <= wdog_next) {
180 			wdog_kern_pat(WD_LASTVAL);
181 			if (wdog_next > WDOG_DUMP_INTERVAL)
182 				wdog_next -= WDOG_DUMP_INTERVAL;
183 			else
184 				wdog_next = 0;
185 		}
186 
187 		if (ptr) {
188 			error = dump_append(di, ptr, 0, len);
189 			if (error)
190 				return (error);
191 			ptr += len;
192 			sz -= len;
193 		} else {
194 			for (i = 0; i < len; i += PAGE_SIZE)
195 				dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT);
196 			fragsz += len;
197 			pa += len;
198 			sz -= len;
199 			if (fragsz == maxdumpsz) {
200 				error = blk_flush(di);
201 				if (error)
202 					return (error);
203 			}
204 		}
205 
206 		/* Check for user abort. */
207 		c = cncheckc();
208 		if (c == 0x03)
209 			return (ECANCELED);
210 		if (c != -1)
211 			printf(" (CTRL-C to abort) ");
212 	}
213 
214 	return (0);
215 }
216 
217 /* A fake page table page, to avoid having to handle both 4K and 2M pages */
218 static pd_entry_t fakepd[NPDEPG];
219 
220 int
221 minidumpsys(struct dumperinfo *di)
222 {
223 	uint32_t pmapsize;
224 	vm_offset_t va;
225 	int error;
226 	uint64_t bits;
227 	uint64_t *pml4, *pdp, *pd, *pt, pa;
228 	int i, ii, j, k, n, bit;
229 	int retry_count;
230 	struct minidumphdr mdhdr;
231 
232 	retry_count = 0;
233  retry:
234 	retry_count++;
235 	counter = 0;
236 	for (i = 0; i < nitems(progress_track); i++)
237 		progress_track[i].visited = 0;
238 	/* Walk page table pages, set bits in vm_page_dump */
239 	pmapsize = 0;
240 	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR,
241 	    kernel_vm_end); ) {
242 		/*
243 		 * We always write a page, even if it is zero. Each
244 		 * page written corresponds to 1GB of space
245 		 */
246 		pmapsize += PAGE_SIZE;
247 		ii = pmap_pml4e_index(va);
248 		pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
249 		pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
250 		i = pmap_pdpe_index(va);
251 		if ((pdp[i] & PG_V) == 0) {
252 			va += NBPDP;
253 			continue;
254 		}
255 
256 		/*
257 		 * 1GB page is represented as 512 2MB pages in a dump.
258 		 */
259 		if ((pdp[i] & PG_PS) != 0) {
260 			va += NBPDP;
261 			pa = pdp[i] & PG_PS_FRAME;
262 			for (n = 0; n < NPDEPG * NPTEPG; n++) {
263 				if (is_dumpable(pa))
264 					dump_add_page(pa);
265 				pa += PAGE_SIZE;
266 			}
267 			continue;
268 		}
269 
270 		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
271 		for (n = 0; n < NPDEPG; n++, va += NBPDR) {
272 			j = pmap_pde_index(va);
273 
274 			if ((pd[j] & PG_V) == 0)
275 				continue;
276 
277 			if ((pd[j] & PG_PS) != 0) {
278 				/* This is an entire 2M page. */
279 				pa = pd[j] & PG_PS_FRAME;
280 				for (k = 0; k < NPTEPG; k++) {
281 					if (is_dumpable(pa))
282 						dump_add_page(pa);
283 					pa += PAGE_SIZE;
284 				}
285 				continue;
286 			}
287 
288 			pa = pd[j] & PG_FRAME;
289 			/* set bit for this PTE page */
290 			if (is_dumpable(pa))
291 				dump_add_page(pa);
292 			/* and for each valid page in this 2MB block */
293 			pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
294 			for (k = 0; k < NPTEPG; k++) {
295 				if ((pt[k] & PG_V) == 0)
296 					continue;
297 				pa = pt[k] & PG_FRAME;
298 				if (is_dumpable(pa))
299 					dump_add_page(pa);
300 			}
301 		}
302 	}
303 
304 	/* Calculate dump size. */
305 	dumpsize = pmapsize;
306 	dumpsize += round_page(msgbufp->msg_size);
307 	dumpsize += round_page(vm_page_dump_size);
308 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
309 		bits = vm_page_dump[i];
310 		while (bits) {
311 			bit = bsfq(bits);
312 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
313 			/* Clear out undumpable pages now if needed */
314 			if (is_dumpable(pa)) {
315 				dumpsize += PAGE_SIZE;
316 			} else {
317 				dump_drop_page(pa);
318 			}
319 			bits &= ~(1ul << bit);
320 		}
321 	}
322 	dumpsize += PAGE_SIZE;
323 
324 	wdog_next = progress = dumpsize;
325 
326 	/* Initialize mdhdr */
327 	bzero(&mdhdr, sizeof(mdhdr));
328 	strcpy(mdhdr.magic, MINIDUMP_MAGIC);
329 	mdhdr.version = MINIDUMP_VERSION;
330 	mdhdr.msgbufsize = msgbufp->msg_size;
331 	mdhdr.bitmapsize = vm_page_dump_size;
332 	mdhdr.pmapsize = pmapsize;
333 	mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
334 	mdhdr.dmapbase = DMAP_MIN_ADDRESS;
335 	mdhdr.dmapend = DMAP_MAX_ADDRESS;
336 
337 	dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
338 	    dumpsize);
339 
340 	error = dump_start(di, &kdh);
341 	if (error != 0)
342 		goto fail;
343 
344 	printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
345 	    ptoa((uintmax_t)physmem) / 1048576);
346 
347 	/* Dump my header */
348 	bzero(&fakepd, sizeof(fakepd));
349 	bcopy(&mdhdr, &fakepd, sizeof(mdhdr));
350 	error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
351 	if (error)
352 		goto fail;
353 
354 	/* Dump msgbuf up front */
355 	error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
356 	if (error)
357 		goto fail;
358 
359 	/* Dump bitmap */
360 	error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
361 	if (error)
362 		goto fail;
363 
364 	/* Dump kernel page directory pages */
365 	bzero(fakepd, sizeof(fakepd));
366 	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR,
367 	    kernel_vm_end); va += NBPDP) {
368 		ii = pmap_pml4e_index(va);
369 		pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
370 		pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
371 		i = pmap_pdpe_index(va);
372 
373 		/* We always write a page, even if it is zero */
374 		if ((pdp[i] & PG_V) == 0) {
375 			error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
376 			if (error)
377 				goto fail;
378 			/* flush, in case we reuse fakepd in the same block */
379 			error = blk_flush(di);
380 			if (error)
381 				goto fail;
382 			continue;
383 		}
384 
385 		/* 1GB page is represented as 512 2MB pages in a dump */
386 		if ((pdp[i] & PG_PS) != 0) {
387 			/* PDPE and PDP have identical layout in this case */
388 			fakepd[0] = pdp[i];
389 			for (j = 1; j < NPDEPG; j++)
390 				fakepd[j] = fakepd[j - 1] + NBPDR;
391 			error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
392 			if (error)
393 				goto fail;
394 			/* flush, in case we reuse fakepd in the same block */
395 			error = blk_flush(di);
396 			if (error)
397 				goto fail;
398 			bzero(fakepd, sizeof(fakepd));
399 			continue;
400 		}
401 
402 		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
403 		error = blk_write(di, (char *)pd, 0, PAGE_SIZE);
404 		if (error)
405 			goto fail;
406 		error = blk_flush(di);
407 		if (error)
408 			goto fail;
409 	}
410 
411 	/* Dump memory chunks */
412 	/* XXX cluster it up and use blk_dump() */
413 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
414 		bits = vm_page_dump[i];
415 		while (bits) {
416 			bit = bsfq(bits);
417 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
418 			error = blk_write(di, 0, pa, PAGE_SIZE);
419 			if (error)
420 				goto fail;
421 			bits &= ~(1ul << bit);
422 		}
423 	}
424 
425 	error = blk_flush(di);
426 	if (error)
427 		goto fail;
428 
429 	error = dump_finish(di, &kdh);
430 	if (error != 0)
431 		goto fail;
432 
433 	printf("\nDump complete\n");
434 	return (0);
435 
436  fail:
437 	if (error < 0)
438 		error = -error;
439 
440 	printf("\n");
441 	if (error == ENOSPC) {
442 		printf("Dump map grown while dumping. ");
443 		if (retry_count < dump_retry_count) {
444 			printf("Retrying...\n");
445 			goto retry;
446 		}
447 		printf("Dump failed.\n");
448 	}
449 	else if (error == ECANCELED)
450 		printf("Dump aborted\n");
451 	else if (error == E2BIG) {
452 		printf("Dump failed. Partition too small (about %lluMB were "
453 		    "needed this time).\n", (long long)dumpsize >> 20);
454 	} else
455 		printf("** DUMP FAILED (ERROR %d) **\n", error);
456 	return (error);
457 }
458 
459 void
460 dump_add_page(vm_paddr_t pa)
461 {
462 	int idx, bit;
463 
464 	pa >>= PAGE_SHIFT;
465 	idx = pa >> 6;		/* 2^6 = 64 */
466 	bit = pa & 63;
467 	atomic_set_long(&vm_page_dump[idx], 1ul << bit);
468 }
469 
470 void
471 dump_drop_page(vm_paddr_t pa)
472 {
473 	int idx, bit;
474 
475 	pa >>= PAGE_SHIFT;
476 	idx = pa >> 6;		/* 2^6 = 64 */
477 	bit = pa & 63;
478 	atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
479 }
480