xref: /freebsd/sys/i386/i386/minidump_machdep.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2006 Peter Wemm
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/conf.h>
33 #include <sys/cons.h>
34 #include <sys/kernel.h>
35 #include <sys/kerneldump.h>
36 #include <sys/msgbuf.h>
37 #include <vm/vm.h>
38 #include <vm/pmap.h>
39 #include <machine/atomic.h>
40 #include <machine/elf.h>
41 #include <machine/md_var.h>
42 #include <machine/vmparam.h>
43 #include <machine/minidump.h>
44 
45 CTASSERT(sizeof(struct kerneldumpheader) == 512);
46 
47 /*
48  * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
49  * is to protect us from metadata and to protect metadata from us.
50  */
51 #define	SIZEOF_METADATA		(64*1024)
52 
53 #define	MD_ALIGN(x)	(((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
54 #define	DEV_ALIGN(x)	(((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
55 
56 uint32_t *vm_page_dump;
57 int vm_page_dump_size;
58 
59 static struct kerneldumpheader kdh;
60 static off_t dumplo;
61 
62 /* Handle chunked writes. */
63 static size_t fragsz;
64 static void *dump_va;
65 static uint64_t counter, progress;
66 
67 CTASSERT(sizeof(*vm_page_dump) == 4);
68 #ifndef XEN
69 #define xpmap_mtop(x) (x)
70 #define xpmap_ptom(x) (x)
71 #endif
72 
73 
74 static int
75 is_dumpable(vm_paddr_t pa)
76 {
77 	int i;
78 
79 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
80 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
81 			return (1);
82 	}
83 	return (0);
84 }
85 
86 #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8)
87 
88 static int
89 blk_flush(struct dumperinfo *di)
90 {
91 	int error;
92 
93 	if (fragsz == 0)
94 		return (0);
95 
96 	error = dump_write(di, dump_va, 0, dumplo, fragsz);
97 	dumplo += fragsz;
98 	fragsz = 0;
99 	return (error);
100 }
101 
102 static int
103 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
104 {
105 	size_t len;
106 	int error, i, c;
107 	u_int maxdumpsz;
108 
109 	maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
110 	if (maxdumpsz == 0)	/* seatbelt */
111 		maxdumpsz = PAGE_SIZE;
112 	error = 0;
113 	if ((sz % PAGE_SIZE) != 0) {
114 		printf("size not page aligned\n");
115 		return (EINVAL);
116 	}
117 	if (ptr != NULL && pa != 0) {
118 		printf("cant have both va and pa!\n");
119 		return (EINVAL);
120 	}
121 	if (pa != 0 && (((uintptr_t)ptr) % PAGE_SIZE) != 0) {
122 		printf("address not page aligned\n");
123 		return (EINVAL);
124 	}
125 	if (ptr != NULL) {
126 		/* If we're doing a virtual dump, flush any pre-existing pa pages */
127 		error = blk_flush(di);
128 		if (error)
129 			return (error);
130 	}
131 	while (sz) {
132 		len = maxdumpsz - fragsz;
133 		if (len > sz)
134 			len = sz;
135 		counter += len;
136 		progress -= len;
137 		if (counter >> 24) {
138 			printf(" %lld", PG2MB(progress >> PAGE_SHIFT));
139 			counter &= (1<<24) - 1;
140 		}
141 		if (ptr) {
142 			error = dump_write(di, ptr, 0, dumplo, len);
143 			if (error)
144 				return (error);
145 			dumplo += len;
146 			ptr += len;
147 			sz -= len;
148 		} else {
149 			for (i = 0; i < len; i += PAGE_SIZE)
150 				dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT);
151 			fragsz += len;
152 			pa += len;
153 			sz -= len;
154 			if (fragsz == maxdumpsz) {
155 				error = blk_flush(di);
156 				if (error)
157 					return (error);
158 			}
159 		}
160 
161 		/* Check for user abort. */
162 		c = cncheckc();
163 		if (c == 0x03)
164 			return (ECANCELED);
165 		if (c != -1)
166 			printf(" (CTRL-C to abort) ");
167 	}
168 
169 	return (0);
170 }
171 
172 /* A fake page table page, to avoid having to handle both 4K and 2M pages */
173 static pt_entry_t fakept[NPTEPG];
174 
175 void
176 minidumpsys(struct dumperinfo *di)
177 {
178 	uint64_t dumpsize;
179 	uint32_t ptesize;
180 	vm_offset_t va;
181 	int error;
182 	uint32_t bits;
183 	uint64_t pa;
184 	pd_entry_t *pd;
185 	pt_entry_t *pt;
186 	int i, j, k, bit;
187 	struct minidumphdr mdhdr;
188 
189 	counter = 0;
190 	/* Walk page table pages, set bits in vm_page_dump */
191 	ptesize = 0;
192 	for (va = KERNBASE; va < kernel_vm_end; va += NBPDR) {
193 		/*
194 		 * We always write a page, even if it is zero. Each
195 		 * page written corresponds to 2MB of space
196 		 */
197 		ptesize += PAGE_SIZE;
198 		pd = (pd_entry_t *)((uintptr_t)IdlePTD + KERNBASE);	/* always mapped! */
199 		j = va >> PDRSHIFT;
200 		if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V))  {
201 			/* This is an entire 2M page. */
202 			pa = xpmap_mtop(pd[j] & PG_PS_FRAME);
203 			for (k = 0; k < NPTEPG; k++) {
204 				if (is_dumpable(pa))
205 					dump_add_page(pa);
206 				pa += PAGE_SIZE;
207 			}
208 			continue;
209 		}
210 		if ((pd[j] & PG_V) == PG_V) {
211 			/* set bit for each valid page in this 2MB block */
212 			pt = pmap_kenter_temporary(xpmap_mtop(pd[j] & PG_FRAME), 0);
213 			for (k = 0; k < NPTEPG; k++) {
214 				if ((pt[k] & PG_V) == PG_V) {
215 					pa = xpmap_mtop(pt[k] & PG_FRAME);
216 					if (is_dumpable(pa))
217 						dump_add_page(pa);
218 				}
219 			}
220 		} else {
221 			/* nothing, we're going to dump a null page */
222 		}
223 	}
224 
225 	/* Calculate dump size. */
226 	dumpsize = ptesize;
227 	dumpsize += round_page(msgbufp->msg_size);
228 	dumpsize += round_page(vm_page_dump_size);
229 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
230 		bits = vm_page_dump[i];
231 		while (bits) {
232 			bit = bsfl(bits);
233 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
234 			/* Clear out undumpable pages now if needed */
235 			if (is_dumpable(pa)) {
236 				dumpsize += PAGE_SIZE;
237 			} else {
238 				dump_drop_page(pa);
239 			}
240 			bits &= ~(1ul << bit);
241 		}
242 	}
243 	dumpsize += PAGE_SIZE;
244 
245 	/* Determine dump offset on device. */
246 	if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
247 		error = ENOSPC;
248 		goto fail;
249 	}
250 	dumplo = di->mediaoffset + di->mediasize - dumpsize;
251 	dumplo -= sizeof(kdh) * 2;
252 	progress = dumpsize;
253 
254 	/* Initialize mdhdr */
255 	bzero(&mdhdr, sizeof(mdhdr));
256 	strcpy(mdhdr.magic, MINIDUMP_MAGIC);
257 	mdhdr.version = MINIDUMP_VERSION;
258 	mdhdr.msgbufsize = msgbufp->msg_size;
259 	mdhdr.bitmapsize = vm_page_dump_size;
260 	mdhdr.ptesize = ptesize;
261 	mdhdr.kernbase = KERNBASE;
262 #ifdef PAE
263 	mdhdr.paemode = 1;
264 #endif
265 
266 	mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_I386_VERSION, dumpsize, di->blocksize);
267 
268 	printf("Physical memory: %ju MB\n", ptoa((uintmax_t)physmem) / 1048576);
269 	printf("Dumping %llu MB:", (long long)dumpsize >> 20);
270 
271 	/* Dump leader */
272 	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
273 	if (error)
274 		goto fail;
275 	dumplo += sizeof(kdh);
276 
277 	/* Dump my header */
278 	bzero(&fakept, sizeof(fakept));
279 	bcopy(&mdhdr, &fakept, sizeof(mdhdr));
280 	error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
281 	if (error)
282 		goto fail;
283 
284 	/* Dump msgbuf up front */
285 	error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
286 	if (error)
287 		goto fail;
288 
289 	/* Dump bitmap */
290 	error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
291 	if (error)
292 		goto fail;
293 
294 	/* Dump kernel page table pages */
295 	for (va = KERNBASE; va < kernel_vm_end; va += NBPDR) {
296 		/* We always write a page, even if it is zero */
297 		pd = (pd_entry_t *)((uintptr_t)IdlePTD + KERNBASE);	/* always mapped! */
298 		j = va >> PDRSHIFT;
299 		if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V))  {
300 			/* This is a single 2M block. Generate a fake PTP */
301 			pa = pd[j] & PG_PS_FRAME;
302 			for (k = 0; k < NPTEPG; k++) {
303 				fakept[k] = (pa + (k * PAGE_SIZE)) | PG_V | PG_RW | PG_A | PG_M;
304 			}
305 			error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
306 			if (error)
307 				goto fail;
308 			/* flush, in case we reuse fakept in the same block */
309 			error = blk_flush(di);
310 			if (error)
311 				goto fail;
312 			continue;
313 		}
314 		if ((pd[j] & PG_V) == PG_V) {
315 			pa = xpmap_mtop(pd[j] & PG_FRAME);
316 #ifndef XEN
317 			error = blk_write(di, 0, pa, PAGE_SIZE);
318 #else
319 			pt = pmap_kenter_temporary(pa, 0);
320 			memcpy(fakept, pt, PAGE_SIZE);
321 			for (i = 0; i < NPTEPG; i++)
322 				fakept[i] = xpmap_mtop(fakept[i]);
323 			error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
324 			if (error)
325 				goto fail;
326 			/* flush, in case we reuse fakept in the same block */
327 			error = blk_flush(di);
328 			if (error)
329 				goto fail;
330 			bzero(fakept, sizeof(fakept));
331 #endif
332 
333 			if (error)
334 				goto fail;
335 		} else {
336 			bzero(fakept, sizeof(fakept));
337 			error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
338 			if (error)
339 				goto fail;
340 			/* flush, in case we reuse fakept in the same block */
341 			error = blk_flush(di);
342 			if (error)
343 				goto fail;
344 		}
345 	}
346 
347 	/* Dump memory chunks */
348 	/* XXX cluster it up and use blk_dump() */
349 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
350 		bits = vm_page_dump[i];
351 		while (bits) {
352 			bit = bsfl(bits);
353 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
354 			error = blk_write(di, 0, pa, PAGE_SIZE);
355 			if (error)
356 				goto fail;
357 			bits &= ~(1ul << bit);
358 		}
359 	}
360 
361 	error = blk_flush(di);
362 	if (error)
363 		goto fail;
364 
365 	/* Dump trailer */
366 	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
367 	if (error)
368 		goto fail;
369 	dumplo += sizeof(kdh);
370 
371 	/* Signal completion, signoff and exit stage left. */
372 	dump_write(di, NULL, 0, 0, 0);
373 	printf("\nDump complete\n");
374 	return;
375 
376  fail:
377 	if (error < 0)
378 		error = -error;
379 
380 	if (error == ECANCELED)
381 		printf("\nDump aborted\n");
382 	else if (error == ENOSPC)
383 		printf("\nDump failed. Partition too small.\n");
384 	else
385 		printf("\n** DUMP FAILED (ERROR %d) **\n", error);
386 }
387 
388 void
389 dump_add_page(vm_paddr_t pa)
390 {
391 	int idx, bit;
392 
393 	pa >>= PAGE_SHIFT;
394 	idx = pa >> 5;		/* 2^5 = 32 */
395 	bit = pa & 31;
396 	atomic_set_int(&vm_page_dump[idx], 1ul << bit);
397 }
398 
399 void
400 dump_drop_page(vm_paddr_t pa)
401 {
402 	int idx, bit;
403 
404 	pa >>= PAGE_SHIFT;
405 	idx = pa >> 5;		/* 2^5 = 32 */
406 	bit = pa & 31;
407 	atomic_clear_int(&vm_page_dump[idx], 1ul << bit);
408 }
409 
410