1 /*-
2  * Copyright (c) 2006 Peter Wemm
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/amd64/amd64/minidump_machdep.c,v 1.10 2009/05/29 21:27:12 jamie Exp $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/conf.h>
32 #include <sys/cons.h>
33 #include <sys/device.h>
34 #include <sys/globaldata.h>
35 #include <sys/kernel.h>
36 #include <sys/kerneldump.h>
37 #include <sys/msgbuf.h>
38 #include <vm/vm.h>
39 #include <vm/vm_kern.h>
40 #include <vm/pmap.h>
41 #include <machine/atomic.h>
42 #include <machine/elf.h>
43 #include <machine/globaldata.h>
44 #include <machine/md_var.h>
45 #include <machine/vmparam.h>
46 #include <machine/minidump.h>
47 
48 CTASSERT(sizeof(struct kerneldumpheader) == 512);
49 
50 /*
51  * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
52  * is to protect us from metadata and to protect metadata from us.
53  */
54 #define	SIZEOF_METADATA		(64*1024)
55 
56 #define	MD_ALIGN(x)	(((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
57 #define	DEV_ALIGN(x)	(((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
58 
59 extern uint64_t KPDPphys;
60 
61 uint64_t *vm_page_dump;
62 int vm_page_dump_size;
63 
64 static struct kerneldumpheader kdh;
65 static off_t dumplo;
66 
67 /* Handle chunked writes. */
68 static size_t fragsz;
69 static void *dump_va;
70 static size_t counter, progress;
71 
72 CTASSERT(sizeof(*vm_page_dump) == 8);
73 
74 static int
75 is_dumpable(vm_paddr_t pa)
76 {
77 	int i;
78 
79 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
80 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
81 			return (1);
82 	}
83 	return (0);
84 }
85 
86 #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8)
87 
88 static int
89 blk_flush(struct dumperinfo *di)
90 {
91 	int error;
92 
93 	if (fragsz == 0)
94 		return (0);
95 
96 	error = dev_ddump(di->priv, dump_va, 0, dumplo, fragsz);
97 	dumplo += fragsz;
98 	fragsz = 0;
99 	return (error);
100 }
101 
102 static int
103 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
104 {
105 	size_t len;
106 	int error, i, c;
107 	int max_iosize;
108 
109 	error = 0;
110 	if ((sz & PAGE_MASK)) {
111 		kprintf("size not page aligned\n");
112 		return (EINVAL);
113 	}
114 	if (ptr != NULL && pa != 0) {
115 		kprintf("can't have both va and pa!\n");
116 		return (EINVAL);
117 	}
118 	if (pa != 0 && (((uintptr_t)pa) & PAGE_MASK) != 0) {
119 		kprintf("address not page aligned\n");
120 		return (EINVAL);
121 	}
122 	if (ptr != NULL) {
123 		/*
124 		 * If we're doing a virtual dump, flush any
125 		 * pre-existing pa pages
126 		 */
127 		error = blk_flush(di);
128 		if (error)
129 			return (error);
130 	}
131 	max_iosize = min(MAXPHYS, di->maxiosize);
132 	while (sz) {
133 		len = max_iosize - fragsz;
134 		if (len > sz)
135 			len = sz;
136 		counter += len;
137 		progress -= len;
138 		if (counter >> 24) {
139 			kprintf(" %ld", PG2MB(progress >> PAGE_SHIFT));
140 			counter &= (1<<24) - 1;
141 		}
142 		if (ptr) {
143 			/*kprintf("s");*/
144 			error = dev_ddump(di->priv, ptr, 0, dumplo, len);
145 			/* kprintf("t");*/
146 			if (error)
147 				return (error);
148 			dumplo += len;
149 			ptr += len;
150 			sz -= len;
151 		} else {
152 			for (i = 0; i < len; i += PAGE_SIZE) {
153 				dump_va = pmap_kenter_temporary(pa + i,
154 						(i + fragsz) >> PAGE_SHIFT);
155 			}
156 			smp_invltlb();
157 			fragsz += len;
158 			pa += len;
159 			sz -= len;
160 			if (fragsz == max_iosize) {
161 				error = blk_flush(di);
162 				if (error)
163 					return (error);
164 			}
165 		}
166 	}
167 
168 	/* Check for user abort. */
169 	c = cncheckc();
170 	if (c == 0x03)
171 		return (ECANCELED);
172 	if (c != -1)
173 		kprintf(" (CTRL-C to abort) ");
174 
175 	return (0);
176 }
177 
178 /* A fake page table page, to avoid having to handle both 4K and 2M pages */
179 static pt_entry_t fakept[NPTEPG];
180 
181 void
182 minidumpsys(struct dumperinfo *di)
183 {
184 	uint64_t dumpsize;
185 	uint32_t ptesize;
186 	vm_offset_t va;
187 	vm_offset_t kern_end;
188 	int error;
189 	uint64_t bits;
190 	uint64_t *pdp, *pd, *pt, pa;
191 	int i, j, k, bit;
192 	struct minidumphdr mdhdr;
193 	struct mdglobaldata *md;
194 
195 	counter = 0;
196 	/*
197 	 * Walk page table pages, set bits in vm_page_dump.
198 	 *
199 	 * NOTE: kernel_vm_end can actually be below KERNBASE.
200 	 * 	 Just use KvaEnd.  Also note that loops which go
201 	 *	 all the way to the end of the address space might
202 	 *	 overflow the loop variable.
203 	 */
204 	ptesize = 0;
205 
206 	md = (struct mdglobaldata *)globaldata_find(0);
207 
208 	kern_end = KvaEnd;
209 	if (kern_end < (vm_offset_t)&(md[ncpus]))
210 		kern_end = (vm_offset_t)&(md[ncpus]);
211 
212 	pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
213 	for (va = VM_MIN_KERNEL_ADDRESS; va < kern_end; va += NBPDR) {
214 		/*
215 		 * The loop probably overflows a 64-bit int due to NBPDR.
216 		 */
217 		if (va < VM_MIN_KERNEL_ADDRESS)
218 			break;
219 
220 		/*
221 		 * We always write a page, even if it is zero. Each
222 		 * page written corresponds to 2MB of space
223 		 */
224 		i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
225 		ptesize += PAGE_SIZE;
226 		if ((pdp[i] & kernel_pmap.pmap_bits[PG_V_IDX]) == 0)
227 			continue;
228 		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
229 		j = ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
230 		if ((pd[j] & (kernel_pmap.pmap_bits[PG_PS_IDX] | kernel_pmap.pmap_bits[PG_V_IDX])) ==
231 		    (kernel_pmap.pmap_bits[PG_PS_IDX] | kernel_pmap.pmap_bits[PG_V_IDX]))  {
232 			/* This is an entire 2M page. */
233 			pa = pd[j] & PG_PS_FRAME;
234 			for (k = 0; k < NPTEPG; k++) {
235 				if (is_dumpable(pa))
236 					dump_add_page(pa);
237 				pa += PAGE_SIZE;
238 			}
239 			continue;
240 		}
241 		if ((pd[j] & kernel_pmap.pmap_bits[PG_V_IDX]) == kernel_pmap.pmap_bits[PG_V_IDX]) {
242 			/* set bit for each valid page in this 2MB block */
243 			pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
244 			for (k = 0; k < NPTEPG; k++) {
245 				if ((pt[k] & kernel_pmap.pmap_bits[PG_V_IDX]) == kernel_pmap.pmap_bits[PG_V_IDX]) {
246 					pa = pt[k] & PG_FRAME;
247 					if (is_dumpable(pa))
248 						dump_add_page(pa);
249 				}
250 			}
251 		} else {
252 			/* nothing, we're going to dump a null page */
253 		}
254 	}
255 
256 	/* Calculate dump size. */
257 	dumpsize = ptesize;
258 	dumpsize += round_page(msgbufp->msg_size);
259 	dumpsize += round_page(vm_page_dump_size);
260 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
261 		bits = vm_page_dump[i];
262 		while (bits) {
263 			bit = bsfq(bits);
264 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
265 			/* Clear out undumpable pages now if needed */
266 			if (is_dumpable(pa)) {
267 				dumpsize += PAGE_SIZE;
268 			} else {
269 				dump_drop_page(pa);
270 			}
271 			bits &= ~(1ul << bit);
272 		}
273 	}
274 	dumpsize += PAGE_SIZE;
275 
276 	/* Determine dump offset on device. */
277 	if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
278 		error = ENOSPC;
279 		goto fail;
280 	}
281 	dumplo = di->mediaoffset + di->mediasize - dumpsize;
282 	dumplo -= sizeof(kdh) * 2;
283 	progress = dumpsize;
284 
285 	/* Initialize mdhdr */
286 	bzero(&mdhdr, sizeof(mdhdr));
287 	strcpy(mdhdr.magic, MINIDUMP_MAGIC);
288 	mdhdr.version = MINIDUMP_VERSION;
289 	mdhdr.msgbufsize = msgbufp->msg_size;
290 	mdhdr.bitmapsize = vm_page_dump_size;
291 	mdhdr.ptesize = ptesize;
292 	mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
293 	mdhdr.dmapbase = DMAP_MIN_ADDRESS;
294 	mdhdr.dmapend = DMAP_MAX_ADDRESS;
295 
296 	mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
297 	    dumpsize, di->blocksize);
298 
299 	kprintf("Physical memory: %jd MB\n", (intmax_t)ptoa(physmem) / 1048576);
300 	kprintf("Dumping %jd MB:", (intmax_t)dumpsize >> 20);
301 
302 	/* Dump leader */
303 	error = dev_ddump(di->priv, &kdh, 0, dumplo, sizeof(kdh));
304 	if (error)
305 		goto fail;
306 	dumplo += sizeof(kdh);
307 
308 	/* Dump my header */
309 	bzero(&fakept, sizeof(fakept));
310 	bcopy(&mdhdr, &fakept, sizeof(mdhdr));
311 	error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
312 	if (error)
313 		goto fail;
314 
315 	/* Dump msgbuf up front */
316 	error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
317 	if (error)
318 		goto fail;
319 
320 	/* Dump bitmap */
321 	error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
322 	if (error)
323 		goto fail;
324 
325 	/* Dump kernel page table pages */
326 	pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
327 	for (va = VM_MIN_KERNEL_ADDRESS; va < kern_end; va += NBPDR) {
328 		/*
329 		 * The loop probably overflows a 64-bit int due to NBPDR.
330 		 */
331 		if (va < VM_MIN_KERNEL_ADDRESS)
332 			break;
333 
334 		/*
335 		 * We always write a page, even if it is zero
336 		 */
337 		i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
338 		if ((pdp[i] & kernel_pmap.pmap_bits[PG_V_IDX]) == 0) {
339 			bzero(fakept, sizeof(fakept));
340 			error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
341 			if (error)
342 				goto fail;
343 			/* flush, in case we reuse fakept in the same block */
344 			error = blk_flush(di);
345 			if (error)
346 				goto fail;
347 			continue;
348 		}
349 		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
350 		j = ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
351 		if ((pd[j] & (kernel_pmap.pmap_bits[PG_PS_IDX] | kernel_pmap.pmap_bits[PG_V_IDX])) ==
352 		    (kernel_pmap.pmap_bits[PG_PS_IDX] | kernel_pmap.pmap_bits[PG_V_IDX]))  {
353 			/* This is a single 2M block. Generate a fake PTP */
354 			pa = pd[j] & PG_PS_FRAME;
355 			for (k = 0; k < NPTEPG; k++) {
356 				fakept[k] = (pa + (k * PAGE_SIZE)) |
357 				    kernel_pmap.pmap_bits[PG_V_IDX] |
358 				    kernel_pmap.pmap_bits[PG_RW_IDX] |
359 				    kernel_pmap.pmap_bits[PG_A_IDX] |
360 				    kernel_pmap.pmap_bits[PG_M_IDX];
361 			}
362 			error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
363 			if (error)
364 				goto fail;
365 			/* flush, in case we reuse fakept in the same block */
366 			error = blk_flush(di);
367 			if (error)
368 				goto fail;
369 			continue;
370 		}
371 		if ((pd[j] & kernel_pmap.pmap_bits[PG_V_IDX]) == kernel_pmap.pmap_bits[PG_V_IDX]) {
372 			pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
373 			error = blk_write(di, (char *)pt, 0, PAGE_SIZE);
374 			if (error)
375 				goto fail;
376 		} else {
377 			bzero(fakept, sizeof(fakept));
378 			error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
379 			if (error)
380 				goto fail;
381 			/* flush, in case we reuse fakept in the same block */
382 			error = blk_flush(di);
383 			if (error)
384 				goto fail;
385 		}
386 	}
387 
388 	/* Dump memory chunks */
389 	/* XXX cluster it up and use blk_dump() */
390 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
391 		bits = vm_page_dump[i];
392 		while (bits) {
393 			bit = bsfq(bits);
394 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
395 			error = blk_write(di, 0, pa, PAGE_SIZE);
396 			if (error)
397 				goto fail;
398 			bits &= ~(1ul << bit);
399 		}
400 	}
401 
402 	error = blk_flush(di);
403 	if (error)
404 		goto fail;
405 
406 	/* Dump trailer */
407 	error = dev_ddump(di->priv, &kdh, 0, dumplo, sizeof(kdh));
408 	if (error)
409 		goto fail;
410 	dumplo += sizeof(kdh);
411 
412 	/* Signal completion, signoff and exit stage left. */
413 	dev_ddump(di->priv, NULL, 0, 0, 0);
414 	kprintf("\nDump complete\n");
415 	return;
416 
417  fail:
418 	if (error < 0)
419 		error = -error;
420 
421 	if (error == ECANCELED)
422 		kprintf("\nDump aborted\n");
423 	else if (error == ENOSPC)
424 		kprintf("\nDump failed. Partition too small.\n");
425 	else
426 		kprintf("\n** DUMP FAILED (ERROR %d) **\n", error);
427 }
428 
429 void
430 dump_add_page(vm_paddr_t pa)
431 {
432 	int idx, bit;
433 
434 	pa >>= PAGE_SHIFT;
435 	idx = pa >> 6;		/* 2^6 = 64 */
436 	bit = pa & 63;
437 	atomic_set_long(&vm_page_dump[idx], 1ul << bit);
438 }
439 
440 void
441 dump_drop_page(vm_paddr_t pa)
442 {
443 	int idx, bit;
444 
445 	pa >>= PAGE_SHIFT;
446 	idx = pa >> 6;		/* 2^6 = 64 */
447 	bit = pa & 63;
448 	atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
449 }
450