xref: /freebsd/sys/riscv/riscv/minidump_machdep.c (revision 7cc42f6d)
1 /*-
2  * Copyright (c) 2006 Peter Wemm
3  * Copyright (c) 2015 The FreeBSD Foundation
4  * All rights reserved.
5  * Copyright (c) 2019 Mitchell Horne
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_watchdog.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/conf.h>
37 #include <sys/cons.h>
38 #include <sys/kernel.h>
39 #include <sys/kerneldump.h>
40 #include <sys/msgbuf.h>
41 #include <sys/watchdog.h>
42 #include <sys/vmmeter.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_phys.h>
48 #include <vm/pmap.h>
49 
50 #include <machine/atomic.h>
51 #include <machine/elf.h>
52 #include <machine/md_var.h>
53 #include <machine/minidump.h>
54 
55 CTASSERT(sizeof(struct kerneldumpheader) == 512);
56 
57 static struct kerneldumpheader kdh;
58 
59 /* Handle chunked writes. */
60 static size_t fragsz;
61 static void *dump_va;
62 static size_t counter, progress, dumpsize;
63 
64 static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
65 
66 static struct {
67 	int min_per;
68 	int max_per;
69 	int visited;
70 } progress_track[10] = {
71 	{  0,  10, 0},
72 	{ 10,  20, 0},
73 	{ 20,  30, 0},
74 	{ 30,  40, 0},
75 	{ 40,  50, 0},
76 	{ 50,  60, 0},
77 	{ 60,  70, 0},
78 	{ 70,  80, 0},
79 	{ 80,  90, 0},
80 	{ 90, 100, 0}
81 };
82 
83 static void
84 report_progress(size_t progress, size_t dumpsize)
85 {
86 	int sofar, i;
87 
88 	sofar = 100 - ((progress * 100) / dumpsize);
89 	for (i = 0; i < nitems(progress_track); i++) {
90 		if (sofar < progress_track[i].min_per ||
91 		    sofar > progress_track[i].max_per)
92 			continue;
93 		if (progress_track[i].visited)
94 			return;
95 		progress_track[i].visited = 1;
96 		printf("..%d%%", sofar);
97 		return;
98 	}
99 }
100 
101 static bool
102 is_dumpable(vm_paddr_t pa)
103 {
104 	vm_page_t m;
105 	int i;
106 
107 	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
108 		return ((m->flags & PG_NODUMP) == 0);
109 
110 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
111 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
112 			return (true);
113 	}
114 	return (false);
115 }
116 
117 static int
118 blk_flush(struct dumperinfo *di)
119 {
120 	int error;
121 
122 	if (fragsz == 0)
123 		return (0);
124 
125 	error = dump_append(di, dump_va, 0, fragsz);
126 	fragsz = 0;
127 	return (error);
128 }
129 
130 /*
131  * Write a block of data to the dump file.
132  *
133  * Caller can provide data through a pointer or by specifying its
134  * physical address.
135  *
136  * XXX writes using pa should be no larger than PAGE_SIZE.
137  */
138 static int
139 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
140 {
141 	size_t len;
142 	int error, c;
143 	u_int maxdumpsz;
144 
145 	maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
146 	if (maxdumpsz == 0)	/* seatbelt */
147 		maxdumpsz = PAGE_SIZE;
148 	error = 0;
149 	if ((sz % PAGE_SIZE) != 0) {
150 		printf("size not page aligned\n");
151 		return (EINVAL);
152 	}
153 	if (ptr != NULL && pa != 0) {
154 		printf("cant have both va and pa!\n");
155 		return (EINVAL);
156 	}
157 	if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
158 		printf("address not page aligned %#lx\n", (uintptr_t)pa);
159 		return (EINVAL);
160 	}
161 	if (ptr != NULL) {
162 		/*
163 		 * If we're doing a virtual dump, flush any
164 		 * pre-existing pa pages.
165 		 */
166 		error = blk_flush(di);
167 		if (error != 0)
168 			return (error);
169 	}
170 	while (sz) {
171 		len = maxdumpsz - fragsz;
172 		if (len > sz)
173 			len = sz;
174 		counter += len;
175 		progress -= len;
176 		if (counter >> 22) {
177 			report_progress(progress, dumpsize);
178 			counter &= (1 << 22) - 1;
179 		}
180 
181 		wdog_kern_pat(WD_LASTVAL);
182 
183 		if (ptr) {
184 			error = dump_append(di, ptr, 0, len);
185 			if (error != 0)
186 				return (error);
187 			ptr += len;
188 			sz -= len;
189 		} else {
190 			dump_va = (void *)PHYS_TO_DMAP(pa);
191 			fragsz += len;
192 			pa += len;
193 			sz -= len;
194 			error = blk_flush(di);
195 			if (error != 0)
196 				return (error);
197 		}
198 
199 		/* Check for user abort */
200 		c = cncheckc();
201 		if (c == 0x03)
202 			return (ECANCELED);
203 		if (c != -1)
204 			printf(" (CTRL-C to abort) ");
205 	}
206 
207 	return (0);
208 }
209 
210 int
211 minidumpsys(struct dumperinfo *di)
212 {
213 	pd_entry_t *l1, *l2;
214 	pt_entry_t *l3;
215 	struct minidumphdr mdhdr;
216 	uint32_t pmapsize;
217 	vm_offset_t va;
218 	vm_paddr_t pa;
219 	int error;
220 	int i;
221 	int retry_count;
222 
223 	retry_count = 0;
224 retry:
225 	retry_count++;
226 	error = 0;
227 	pmapsize = 0;
228 
229 	/* Build set of dumpable pages from kernel pmap */
230 	for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
231 		pmapsize += PAGE_SIZE;
232 		if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3))
233 			continue;
234 
235 		/* We should always be using the l2 table for kvm */
236 		if (l2 == NULL)
237 			continue;
238 
239 		/* l2 may be a superpage */
240 		if ((*l2 & PTE_RWX) != 0) {
241 			pa = (*l2 >> PTE_PPN1_S) << L2_SHIFT;
242 			for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
243 				if (is_dumpable(pa))
244 					dump_add_page(pa);
245 			}
246 		} else {
247 			for (i = 0; i < Ln_ENTRIES; i++) {
248 				if ((l3[i] & PTE_V) == 0)
249 					continue;
250 				pa = (l3[i] >> PTE_PPN0_S) * PAGE_SIZE;
251 				if (is_dumpable(pa))
252 					dump_add_page(pa);
253 			}
254 		}
255 	}
256 
257 	/* Calculate dump size */
258 	dumpsize = pmapsize;
259 	dumpsize += round_page(msgbufp->msg_size);
260 	dumpsize += round_page(sizeof(dump_avail));
261 	dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
262 	VM_PAGE_DUMP_FOREACH(pa) {
263 		/* Clear out undumpable pages now if needed */
264 		if (is_dumpable(pa))
265 			dumpsize += PAGE_SIZE;
266 		else
267 			dump_drop_page(pa);
268 	}
269 	dumpsize += PAGE_SIZE;
270 
271 	progress = dumpsize;
272 
273 	/* Initialize mdhdr */
274 	bzero(&mdhdr, sizeof(mdhdr));
275 	strcpy(mdhdr.magic, MINIDUMP_MAGIC);
276 	mdhdr.version = MINIDUMP_VERSION;
277 	mdhdr.msgbufsize = msgbufp->msg_size;
278 	mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
279 	mdhdr.pmapsize = pmapsize;
280 	mdhdr.kernbase = KERNBASE;
281 	mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
282 	mdhdr.dmapbase = DMAP_MIN_ADDRESS;
283 	mdhdr.dmapend = DMAP_MAX_ADDRESS;
284 	mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
285 
286 	dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_RISCV_VERSION,
287 	    dumpsize);
288 
289 	error = dump_start(di, &kdh);
290 	if (error != 0)
291 		goto fail;
292 
293 	printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
294 	    ptoa((uintmax_t)physmem) / 1048576);
295 
296 	/* Dump minidump header */
297 	bzero(&tmpbuffer, sizeof(tmpbuffer));
298 	bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
299 	error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
300 	if (error)
301 		goto fail;
302 
303 	/* Dump msgbuf up front */
304 	error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
305 	    round_page(msgbufp->msg_size));
306 	if (error)
307 		goto fail;
308 
309 	/* Dump dump_avail */
310 	_Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
311 	    "Large dump_avail not handled");
312 	bzero(tmpbuffer, sizeof(tmpbuffer));
313 	memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
314 	error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
315 	if (error)
316 		goto fail;
317 
318 	/* Dump bitmap */
319 	error = blk_write(di, (char *)vm_page_dump, 0,
320 	    round_page(BITSET_SIZE(vm_page_dump_pages)));
321 	if (error)
322 		goto fail;
323 
324 	/* Dump kernel page directory pages */
325 	bzero(&tmpbuffer, sizeof(tmpbuffer));
326 	for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
327 		if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) {
328 			/* We always write a page, even if it is zero */
329 			error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
330 			if (error)
331 				goto fail;
332 			/* Flush, in case we reuse tmpbuffer in the same block */
333 			error = blk_flush(di);
334 			if (error)
335 				goto fail;
336 		} else if ((*l2 & PTE_RWX) != 0) {
337 			/* Generate fake l3 entries based on the l2 superpage */
338 			for (i = 0; i < Ln_ENTRIES; i++) {
339 				tmpbuffer[i] = (*l2 | (i << PTE_PPN0_S));
340 			}
341 			/* We always write a page, even if it is zero */
342 			error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
343 			if (error)
344 				goto fail;
345 			/* Flush, in case we reuse tmpbuffer in the same block */
346 			error = blk_flush(di);
347 			if (error)
348 				goto fail;
349 			bzero(&tmpbuffer, sizeof(tmpbuffer));
350 		} else {
351 			pa = (*l2 >> PTE_PPN0_S) * PAGE_SIZE;
352 
353 			/* We always write a page, even if it is zero */
354 			error = blk_write(di, NULL, pa, PAGE_SIZE);
355 			if (error)
356 				goto fail;
357 		}
358 	}
359 
360 	/* Dump memory chunks */
361 	/* XXX cluster it up and use blk_dump() */
362 	VM_PAGE_DUMP_FOREACH(pa) {
363 		error = blk_write(di, 0, pa, PAGE_SIZE);
364 		if (error)
365 			goto fail;
366 	}
367 
368 	error = blk_flush(di);
369 	if (error)
370 		goto fail;
371 
372 	error = dump_finish(di, &kdh);
373 	if (error != 0)
374 		goto fail;
375 
376 	printf("\nDump complete\n");
377 	return (0);
378 
379 fail:
380 	if (error < 0)
381 		error = -error;
382 
383 	printf("\n");
384 	if (error == ENOSPC) {
385 		printf("Dump map grown while dumping. ");
386 		if (retry_count < 5) {
387 			printf("Retrying...\n");
388 			goto retry;
389 		}
390 		printf("Dump failed.\n");
391 	}
392 	else if (error == ECANCELED)
393 		printf("Dump aborted\n");
394 	else if (error == E2BIG) {
395 		printf("Dump failed. Partition too small (about %lluMB were "
396 		    "needed this time).\n", (long long)dumpsize >> 20);
397 	} else
398 		printf("** DUMP FAILED (ERROR %d) **\n", error);
399 	return (error);
400 }
401