xref: /openbsd/lib/libkvm/kvm.c (revision 73471bf0)
1 /*	$OpenBSD: kvm.c,v 1.70 2021/12/01 16:51:57 deraadt Exp $ */
2 /*	$NetBSD: kvm.c,v 1.43 1996/05/05 04:31:59 gwr Exp $	*/
3 
4 /*-
5  * Copyright (c) 1989, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software developed by the Computer Systems
9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10  * BG 91-66 and contributed to Berkeley.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>	/* MAXCOMLEN MID_MACHINE */
38 #include <sys/proc.h>
39 #include <sys/ioctl.h>
40 #include <sys/stat.h>
41 #include <sys/sysctl.h>
42 
43 #include <sys/core.h>
44 #include <sys/exec.h>
45 #include <sys/kcore.h>
46 
47 #include <stddef.h>
48 #include <errno.h>
49 #include <ctype.h>
50 #include <db.h>
51 #include <fcntl.h>
52 #include <libgen.h>
53 #include <limits.h>
54 #include <nlist.h>
55 #include <paths.h>
56 #include <stdio.h>
57 #include <stdlib.h>
58 #include <string.h>
59 #include <unistd.h>
60 #include <kvm.h>
61 #include <stdarg.h>
62 
63 #include "kvm_private.h"
64 
65 extern int __fdnlist(int, struct nlist *);
66 
67 static int	kvm_dbopen(kvm_t *, const char *);
68 static int	kvm_opennamelist(kvm_t *, const char *);
69 static int	_kvm_get_header(kvm_t *);
70 static kvm_t	*_kvm_open(kvm_t *, const char *, const char *, const char *,
71 		     int, char *);
72 static int	clear_gap(kvm_t *, FILE *, int);
73 
74 char *
75 kvm_geterr(kvm_t *kd)
76 {
77 	return (kd->errbuf);
78 }
79 
80 /*
81  * Wrapper around pread.
82  */
83 ssize_t
84 _kvm_pread(kvm_t *kd, int fd, void *buf, size_t nbytes, off_t offset)
85 {
86 	ssize_t rval;
87 
88 	errno = 0;
89 	rval = pread(fd, buf, nbytes, offset);
90 	if (rval == -1 || errno != 0) {
91 		_kvm_syserr(kd, kd->program, "pread");
92 	}
93 	return (rval);
94 }
95 
96 /*
97  * Wrapper around pwrite.
98  */
99 ssize_t
100 _kvm_pwrite(kvm_t *kd, int fd, const void *buf, size_t nbytes, off_t offset)
101 {
102 	ssize_t rval;
103 
104 	errno = 0;
105 	rval = pwrite(fd, buf, nbytes, offset);
106 	if (rval == -1 || errno != 0) {
107 		_kvm_syserr(kd, kd->program, "pwrite");
108 	}
109 	return (rval);
110 }
111 
112 /*
113  * Report an error using printf style arguments.  "program" is kd->program
114  * on hard errors, and 0 on soft errors, so that under sun error emulation,
115  * only hard errors are printed out (otherwise, programs like gdb will
116  * generate tons of error messages when trying to access bogus pointers).
117  */
118 void
119 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
120 {
121 	va_list ap;
122 
123 	va_start(ap, fmt);
124 	if (program != NULL) {
125 		(void)fprintf(stderr, "%s: ", program);
126 		(void)vfprintf(stderr, fmt, ap);
127 		(void)fputc('\n', stderr);
128 	} else
129 		(void)vsnprintf(kd->errbuf,
130 		    sizeof(kd->errbuf), fmt, ap);
131 
132 	va_end(ap);
133 }
134 
135 void
136 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
137 {
138 	va_list ap;
139 	size_t n;
140 
141 	va_start(ap, fmt);
142 	if (program != NULL) {
143 		(void)fprintf(stderr, "%s: ", program);
144 		(void)vfprintf(stderr, fmt, ap);
145 		(void)fprintf(stderr, ": %s\n", strerror(errno));
146 	} else {
147 		char *cp = kd->errbuf;
148 
149 		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
150 		n = strlen(cp);
151 		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
152 		    strerror(errno));
153 	}
154 	va_end(ap);
155 }
156 
157 void *
158 _kvm_malloc(kvm_t *kd, size_t n)
159 {
160 	void *p;
161 
162 	if ((p = malloc(n)) == NULL)
163 		_kvm_err(kd, kd->program, "%s", strerror(errno));
164 	return (p);
165 }
166 
167 void *
168 _kvm_realloc(kvm_t *kd, void *p, size_t n)
169 {
170 	if ((p = realloc(p, n)) == NULL)
171 		_kvm_err(kd, kd->program, "%s", strerror(errno));
172 	return (p);
173 }
174 
175 static kvm_t *
176 _kvm_open(kvm_t *kd, const char *uf, const char *mf, const char *sf,
177     int flag, char *errout)
178 {
179 	struct stat st;
180 
181 	kd->db = 0;
182 	kd->pmfd = -1;
183 	kd->vmfd = -1;
184 	kd->swfd = -1;
185 	kd->nlfd = -1;
186 	kd->alive = 0;
187 	kd->filebase = NULL;
188 	kd->procbase = NULL;
189 	kd->nbpg = getpagesize();
190 	kd->swapspc = 0;
191 	kd->argspc = 0;
192 	kd->argbuf = 0;
193 	kd->argv = 0;
194 	kd->envspc = 0;
195 	kd->envbuf = 0;
196 	kd->envp = 0;
197 	kd->vmst = NULL;
198 	kd->vm_page_buckets = 0;
199 	kd->kcore_hdr = 0;
200 	kd->cpu_dsize = 0;
201 	kd->cpu_data = 0;
202 	kd->dump_off = 0;
203 
204 	if (flag & KVM_NO_FILES) {
205 		kd->alive = 1;
206 		return (kd);
207 	}
208 
209 	if (uf && strlen(uf) >= PATH_MAX) {
210 		_kvm_err(kd, kd->program, "exec file name too long");
211 		goto failed;
212 	}
213 	if (flag != O_RDONLY && flag != O_WRONLY && flag != O_RDWR) {
214 		_kvm_err(kd, kd->program, "bad flags arg");
215 		goto failed;
216 	}
217 	flag |= O_CLOEXEC;
218 
219 	if (mf == NULL)
220 		mf = _PATH_MEM;
221 
222 	if ((kd->pmfd = open(mf, flag)) == -1) {
223 		_kvm_syserr(kd, kd->program, "%s", mf);
224 		goto failed;
225 	}
226 	if (fstat(kd->pmfd, &st) == -1) {
227 		_kvm_syserr(kd, kd->program, "%s", mf);
228 		goto failed;
229 	}
230 	if (S_ISCHR(st.st_mode)) {
231 		/*
232 		 * If this is a character special device, then check that
233 		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
234 		 * make it work for either /dev/mem or /dev/kmem -- in either
235 		 * case you're working with a live kernel.)
236 		 */
237 		if (strcmp(mf, _PATH_MEM) != 0) {	/* XXX */
238 			_kvm_err(kd, kd->program,
239 				 "%s: not physical memory device", mf);
240 			goto failed;
241 		}
242 		if ((kd->vmfd = open(_PATH_KMEM, flag)) == -1) {
243 			_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
244 			goto failed;
245 		}
246 		kd->alive = 1;
247 		if (sf != NULL && (kd->swfd = open(sf, flag)) == -1) {
248 			_kvm_syserr(kd, kd->program, "%s", sf);
249 			goto failed;
250 		}
251 		/*
252 		 * Open kvm nlist database.  We only try to use
253 		 * the pre-built database if the namelist file name
254 		 * pointer is NULL.  If the database cannot or should
255 		 * not be opened, open the namelist argument so we
256 		 * revert to slow nlist() calls.
257 		 * If no file is specified, try opening _PATH_KSYMS and
258 		 * fall back to _PATH_UNIX.
259 		 */
260 		if (kvm_dbopen(kd, uf ? uf : _PATH_UNIX) == -1 &&
261 		    kvm_opennamelist(kd, uf))
262 			goto failed;
263 	} else {
264 		/*
265 		 * This is a crash dump.
266 		 * Initialize the virtual address translation machinery,
267 		 * but first setup the namelist fd.
268 		 * If no file is specified, try opening _PATH_KSYMS and
269 		 * fall back to _PATH_UNIX.
270 		 */
271 		if (kvm_opennamelist(kd, uf))
272 			goto failed;
273 
274 		/*
275 		 * If there is no valid core header, fail silently here.
276 		 * The address translations however will fail without
277 		 * header. Things can be made to run by calling
278 		 * kvm_dump_mkheader() before doing any translation.
279 		 */
280 		if (_kvm_get_header(kd) == 0) {
281 			if (_kvm_initvtop(kd) < 0)
282 				goto failed;
283 		}
284 	}
285 	return (kd);
286 failed:
287 	/*
288 	 * Copy out the error if doing sane error semantics.
289 	 */
290 	if (errout != 0)
291 		(void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
292 	(void)kvm_close(kd);
293 	return (0);
294 }
295 
296 static int
297 kvm_opennamelist(kvm_t *kd, const char *uf)
298 {
299 	int fd;
300 
301 	if (uf != NULL)
302 		fd = open(uf, O_RDONLY | O_CLOEXEC);
303 	else {
304 		fd = open(_PATH_KSYMS, O_RDONLY | O_CLOEXEC);
305 		uf = _PATH_UNIX;
306 		if (fd == -1)
307 			fd = open(uf, O_RDONLY | O_CLOEXEC);
308 	}
309 	if (fd == -1) {
310 		_kvm_syserr(kd, kd->program, "%s", uf);
311 		return (-1);
312 	}
313 
314 	kd->nlfd = fd;
315 	return (0);
316 }
317 
318 /*
319  * The kernel dump file (from savecore) contains:
320  *    kcore_hdr_t kcore_hdr;
321  *    kcore_seg_t cpu_hdr;
322  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
323  *    kcore_seg_t mem_hdr;
324  *    (memory)    mem_data; (size is mem_hdr.c_size)
325  *
326  * Note: khdr is padded to khdr.c_hdrsize;
327  * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
328  */
329 static int
330 _kvm_get_header(kvm_t *kd)
331 {
332 	kcore_hdr_t	kcore_hdr;
333 	kcore_seg_t	cpu_hdr;
334 	kcore_seg_t	mem_hdr;
335 	size_t		offset;
336 	ssize_t		sz;
337 
338 	/*
339 	 * Read the kcore_hdr_t
340 	 */
341 	sz = _kvm_pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
342 	if (sz != sizeof(kcore_hdr)) {
343 		return (-1);
344 	}
345 
346 	/*
347 	 * Currently, we only support dump-files made by the current
348 	 * architecture...
349 	 */
350 	if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
351 	    (CORE_GETMID(kcore_hdr) != MID_MACHINE))
352 		return (-1);
353 
354 	/*
355 	 * Currently, we only support exactly 2 segments: cpu-segment
356 	 * and data-segment in exactly that order.
357 	 */
358 	if (kcore_hdr.c_nseg != 2)
359 		return (-1);
360 
361 	/*
362 	 * Save away the kcore_hdr.  All errors after this
363 	 * should do a to "goto fail" to deallocate things.
364 	 */
365 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
366 	if (kd->kcore_hdr == NULL)
367 		goto fail;
368 	memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
369 	offset = kcore_hdr.c_hdrsize;
370 
371 	/*
372 	 * Read the CPU segment header
373 	 */
374 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
375 	if (sz != sizeof(cpu_hdr)) {
376 		goto fail;
377 	}
378 
379 	if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
380 	    (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
381 		goto fail;
382 	offset += kcore_hdr.c_seghdrsize;
383 
384 	/*
385 	 * Read the CPU segment DATA.
386 	 */
387 	kd->cpu_dsize = cpu_hdr.c_size;
388 	kd->cpu_data = _kvm_malloc(kd, (size_t)cpu_hdr.c_size);
389 	if (kd->cpu_data == NULL)
390 		goto fail;
391 
392 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
393 	    (off_t)offset);
394 	if (sz != (size_t)cpu_hdr.c_size) {
395 		goto fail;
396 	}
397 
398 	offset += cpu_hdr.c_size;
399 
400 	/*
401 	 * Read the next segment header: data segment
402 	 */
403 	sz = _kvm_pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
404 	if (sz != sizeof(mem_hdr)) {
405 		goto fail;
406 	}
407 
408 	offset += kcore_hdr.c_seghdrsize;
409 
410 	if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
411 	    (CORE_GETFLAG(mem_hdr) != CORE_DATA))
412 		goto fail;
413 
414 	kd->dump_off = offset;
415 	return (0);
416 
417 fail:
418 	free(kd->kcore_hdr);
419 	kd->kcore_hdr = NULL;
420 	if (kd->cpu_data != NULL) {
421 		free(kd->cpu_data);
422 		kd->cpu_data = NULL;
423 		kd->cpu_dsize = 0;
424 	}
425 
426 	return (-1);
427 }
428 
429 /*
430  * The format while on the dump device is: (new format)
431  *    kcore_seg_t cpu_hdr;
432  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
433  *    kcore_seg_t mem_hdr;
434  *    (memory)    mem_data; (size is mem_hdr.c_size)
435  */
436 int
437 kvm_dump_mkheader(kvm_t *kd, off_t dump_off)
438 {
439 	kcore_seg_t	cpu_hdr;
440 	int	hdr_size;
441 	ssize_t sz;
442 
443 	if (kd->kcore_hdr != NULL) {
444 	    _kvm_err(kd, kd->program, "already has a dump header");
445 	    return (-1);
446 	}
447 	if (ISALIVE(kd)) {
448 		_kvm_err(kd, kd->program, "don't use on live kernel");
449 		return (-1);
450 	}
451 
452 	/*
453 	 * Validate new format crash dump
454 	 */
455 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)dump_off);
456 	if (sz != sizeof(cpu_hdr)) {
457 		return (-1);
458 	}
459 	if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
460 		|| (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
461 		_kvm_err(kd, 0, "invalid magic in cpu_hdr");
462 		return (-1);
463 	}
464 	hdr_size = _ALIGN(sizeof(cpu_hdr));
465 
466 	/*
467 	 * Read the CPU segment.
468 	 */
469 	kd->cpu_dsize = cpu_hdr.c_size;
470 	kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
471 	if (kd->cpu_data == NULL)
472 		goto fail;
473 
474 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
475 	    (off_t)dump_off+hdr_size);
476 	if (sz != (ssize_t)cpu_hdr.c_size) {
477 		_kvm_err(kd, 0, "invalid size in cpu_hdr");
478 		goto fail;
479 	}
480 	hdr_size += kd->cpu_dsize;
481 
482 	/*
483 	 * Leave phys mem pointer at beginning of memory data
484 	 */
485 	kd->dump_off = dump_off + hdr_size;
486 	errno = 0;
487 	if (lseek(kd->pmfd, kd->dump_off, SEEK_SET) != kd->dump_off && errno != 0) {
488 		_kvm_err(kd, 0, "invalid dump offset - lseek");
489 		goto fail;
490 	}
491 
492 	/*
493 	 * Create a kcore_hdr.
494 	 */
495 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
496 	if (kd->kcore_hdr == NULL)
497 		goto fail;
498 
499 	kd->kcore_hdr->c_hdrsize    = _ALIGN(sizeof(kcore_hdr_t));
500 	kd->kcore_hdr->c_seghdrsize = _ALIGN(sizeof(kcore_seg_t));
501 	kd->kcore_hdr->c_nseg       = 2;
502 	CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
503 
504 	/*
505 	 * Now that we have a valid header, enable translations.
506 	 */
507 	if (_kvm_initvtop(kd) == 0)
508 		/* Success */
509 		return (hdr_size);
510 
511 fail:
512 	free(kd->kcore_hdr);
513 	kd->kcore_hdr = NULL;
514 	if (kd->cpu_data != NULL) {
515 		free(kd->cpu_data);
516 		kd->cpu_data = NULL;
517 		kd->cpu_dsize = 0;
518 	}
519 	return (-1);
520 }
521 
522 static int
523 clear_gap(kvm_t *kd, FILE *fp, int size)
524 {
525 	if (size <= 0) /* XXX - < 0 should never happen */
526 		return (0);
527 	while (size-- > 0) {
528 		if (fputc(0, fp) == EOF) {
529 			_kvm_syserr(kd, kd->program, "clear_gap");
530 			return (-1);
531 		}
532 	}
533 	return (0);
534 }
535 
536 /*
537  * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
538  * because 'fp' might be a file pointer obtained by zopen().
539  */
540 int
541 kvm_dump_wrtheader(kvm_t *kd, FILE *fp, int dumpsize)
542 {
543 	kcore_seg_t	seghdr;
544 	long		offset;
545 	int		gap;
546 
547 	if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
548 		_kvm_err(kd, kd->program, "no valid dump header(s)");
549 		return (-1);
550 	}
551 
552 	/*
553 	 * Write the generic header
554 	 */
555 	offset = 0;
556 	if (fwrite(kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) < 1) {
557 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
558 		return (-1);
559 	}
560 	offset += kd->kcore_hdr->c_hdrsize;
561 	gap     = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
562 	if (clear_gap(kd, fp, gap) == -1)
563 		return (-1);
564 
565 	/*
566 	 * Write the cpu header
567 	 */
568 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
569 	seghdr.c_size = (u_long)_ALIGN(kd->cpu_dsize);
570 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
571 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
572 		return (-1);
573 	}
574 	offset += kd->kcore_hdr->c_seghdrsize;
575 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
576 	if (clear_gap(kd, fp, gap) == -1)
577 		return (-1);
578 
579 	if (fwrite(kd->cpu_data, kd->cpu_dsize, 1, fp) < 1) {
580 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
581 		return (-1);
582 	}
583 	offset += seghdr.c_size;
584 	gap     = seghdr.c_size - kd->cpu_dsize;
585 	if (clear_gap(kd, fp, gap) == -1)
586 		return (-1);
587 
588 	/*
589 	 * Write the actual dump data segment header
590 	 */
591 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
592 	seghdr.c_size = dumpsize;
593 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
594 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
595 		return (-1);
596 	}
597 	offset += kd->kcore_hdr->c_seghdrsize;
598 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
599 	if (clear_gap(kd, fp, gap) == -1)
600 		return (-1);
601 
602 	return (offset);
603 }
604 
605 kvm_t *
606 kvm_openfiles(const char *uf, const char *mf, const char *sf,
607     int flag, char *errout)
608 {
609 	kvm_t *kd;
610 
611 	if ((kd = malloc(sizeof(*kd))) == NULL) {
612 		(void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
613 		return (0);
614 	}
615 	kd->program = 0;
616 	return (_kvm_open(kd, uf, mf, sf, flag, errout));
617 }
618 
619 kvm_t *
620 kvm_open(const char *uf, const char *mf, const char *sf, int flag,
621     const char *program)
622 {
623 	kvm_t *kd;
624 
625 	if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
626 		(void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
627 		return (0);
628 	}
629 	kd->program = program;
630 	return (_kvm_open(kd, uf, mf, sf, flag, NULL));
631 }
632 
633 int
634 kvm_close(kvm_t *kd)
635 {
636 	int error = 0;
637 
638 	if (kd->pmfd >= 0)
639 		error |= close(kd->pmfd);
640 	if (kd->vmfd >= 0)
641 		error |= close(kd->vmfd);
642 	kd->alive = 0;
643 	if (kd->nlfd >= 0)
644 		error |= close(kd->nlfd);
645 	if (kd->swfd >= 0)
646 		error |= close(kd->swfd);
647 	if (kd->db != 0)
648 		error |= (kd->db->close)(kd->db);
649 	if (kd->vmst)
650 		_kvm_freevtop(kd);
651 	kd->cpu_dsize = 0;
652 	free(kd->cpu_data);
653 	free(kd->kcore_hdr);
654 	free(kd->filebase);
655 	free(kd->procbase);
656 	free(kd->swapspc);
657 	free(kd->argspc);
658 	free(kd->argbuf);
659 	free(kd->argv);
660 	free(kd->envspc);
661 	free(kd->envbuf);
662 	free(kd->envp);
663 	free(kd);
664 
665 	return (error);
666 }
667 DEF(kvm_close);
668 
669 /*
670  * Set up state necessary to do queries on the kernel namelist
671  * data base.  If the data base is out-of-data/incompatible with
672  * given executable, set up things so we revert to standard nlist call.
673  * Only called for live kernels.  Return 0 on success, -1 on failure.
674  */
675 static int
676 kvm_dbopen(kvm_t *kd, const char *uf)
677 {
678 	char dbversion[_POSIX2_LINE_MAX], kversion[_POSIX2_LINE_MAX];
679 	char dbname[PATH_MAX], ufbuf[PATH_MAX];
680 	struct nlist nitem;
681 	size_t dbversionlen;
682 	DBT rec;
683 
684 	strlcpy(ufbuf, uf, sizeof(ufbuf));
685 	uf = basename(ufbuf);
686 
687 	(void)snprintf(dbname, sizeof(dbname), "%skvm_%s.db", _PATH_VARDB, uf);
688 	kd->db = dbopen(dbname, O_RDONLY, 0, DB_HASH, NULL);
689 	if (kd->db == NULL) {
690 		switch (errno) {
691 		case ENOENT:
692 			/* No kvm_bsd.db, fall back to /bsd silently */
693 			break;
694 		case EFTYPE:
695 			_kvm_err(kd, kd->program,
696 			    "file %s is incorrectly formatted", dbname);
697 			break;
698 		case EINVAL:
699 			_kvm_err(kd, kd->program,
700 			    "invalid argument to dbopen()");
701 			break;
702 		default:
703 			_kvm_err(kd, kd->program, "unknown dbopen() error");
704 			break;
705 		}
706 		return (-1);
707 	}
708 
709 	/*
710 	 * read version out of database
711 	 */
712 	rec.data = VRS_KEY;
713 	rec.size = sizeof(VRS_KEY) - 1;
714 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
715 		goto close;
716 	if (rec.data == 0 || rec.size > sizeof(dbversion))
717 		goto close;
718 
719 	bcopy(rec.data, dbversion, rec.size);
720 	dbversionlen = rec.size;
721 
722 	/*
723 	 * Read version string from kernel memory.
724 	 * Since we are dealing with a live kernel, we can call kvm_read()
725 	 * at this point.
726 	 */
727 	rec.data = VRS_SYM;
728 	rec.size = sizeof(VRS_SYM) - 1;
729 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
730 		goto close;
731 	if (rec.data == 0 || rec.size != sizeof(struct nlist))
732 		goto close;
733 	bcopy(rec.data, &nitem, sizeof(nitem));
734 	if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
735 	    dbversionlen)
736 		goto close;
737 	/*
738 	 * If they match, we win - otherwise clear out kd->db so
739 	 * we revert to slow nlist().
740 	 */
741 	if (bcmp(dbversion, kversion, dbversionlen) == 0)
742 		return (0);
743 close:
744 	(void)(kd->db->close)(kd->db);
745 	kd->db = 0;
746 
747 	return (-1);
748 }
749 
750 int
751 kvm_nlist(kvm_t *kd, struct nlist *nl)
752 {
753 	struct nlist *p;
754 	int nvalid, rv;
755 
756 	/*
757 	 * If we can't use the data base, revert to the
758 	 * slow library call.
759 	 */
760 	if (kd->db == 0) {
761 		rv = __fdnlist(kd->nlfd, nl);
762 		if (rv == -1)
763 			_kvm_err(kd, 0, "bad namelist");
764 		return (rv);
765 	}
766 
767 	/*
768 	 * We can use the kvm data base.  Go through each nlist entry
769 	 * and look it up with a db query.
770 	 */
771 	nvalid = 0;
772 	for (p = nl; p->n_name && p->n_name[0]; ++p) {
773 		size_t len;
774 		DBT rec;
775 
776 		if ((len = strlen(p->n_name)) > 4096) {
777 			/* sanity */
778 			_kvm_err(kd, kd->program, "symbol too large");
779 			return (-1);
780 		}
781 		rec.data = p->n_name;
782 		rec.size = len;
783 
784 		/*
785 		 * Make sure that n_value = 0 when the symbol isn't found
786 		 */
787 		p->n_value = 0;
788 
789 		if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
790 			continue;
791 		if (rec.data == 0 || rec.size != sizeof(struct nlist))
792 			continue;
793 		++nvalid;
794 		/*
795 		 * Avoid alignment issues.
796 		 */
797 		bcopy((char *)rec.data + offsetof(struct nlist, n_type),
798 		    &p->n_type, sizeof(p->n_type));
799 		bcopy((char *)rec.data + offsetof(struct nlist, n_value),
800 		    &p->n_value, sizeof(p->n_value));
801 	}
802 	/*
803 	 * Return the number of entries that weren't found.
804 	 */
805 	return ((p - nl) - nvalid);
806 }
807 DEF(kvm_nlist);
808 
809 int
810 kvm_dump_inval(kvm_t *kd)
811 {
812 	struct nlist	nl[2];
813 	u_long		x;
814 	paddr_t		pa;
815 
816 	if (ISALIVE(kd)) {
817 		_kvm_err(kd, kd->program, "clearing dump on live kernel");
818 		return (-1);
819 	}
820 	nl[0].n_name = "_dumpmag";
821 	nl[1].n_name = NULL;
822 
823 	if (kvm_nlist(kd, nl) == -1) {
824 		_kvm_err(kd, 0, "bad namelist");
825 		return (-1);
826 	}
827 
828 	if (nl[0].n_value == 0) {
829 		_kvm_err(kd, nl[0].n_name, "not in name list");
830 		return (-1);
831 	}
832 
833 	if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
834 		return (-1);
835 
836 	x = 0;
837 	if (_kvm_pwrite(kd, kd->pmfd, &x, sizeof(x),
838 	    (off_t)_kvm_pa2off(kd, pa)) != sizeof(x)) {
839 		_kvm_err(kd, 0, "cannot invalidate dump");
840 		return (-1);
841 	}
842 	return (0);
843 }
844 
845 ssize_t
846 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
847 {
848 	ssize_t cc;
849 	void *cp;
850 
851 	if (ISALIVE(kd)) {
852 		/*
853 		 * We're using /dev/kmem.  Just read straight from the
854 		 * device and let the active kernel do the address translation.
855 		 */
856 		cc = _kvm_pread(kd, kd->vmfd, buf, len, (off_t)kva);
857 		if (cc == -1) {
858 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
859 			return (-1);
860 		} else if (cc < len)
861 			_kvm_err(kd, kd->program, "short read");
862 		return (cc);
863 	} else {
864 		if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
865 			_kvm_err(kd, kd->program, "no valid dump header");
866 			return (-1);
867 		}
868 		cp = buf;
869 		while (len > 0) {
870 			paddr_t	pa;
871 
872 			/* In case of error, _kvm_kvatop sets the err string */
873 			cc = _kvm_kvatop(kd, kva, &pa);
874 			if (cc == 0)
875 				return (-1);
876 			if (cc > len)
877 				cc = len;
878 			cc = _kvm_pread(kd, kd->pmfd, cp, (size_t)cc,
879 			    (off_t)_kvm_pa2off(kd, pa));
880 			if (cc == -1) {
881 				_kvm_syserr(kd, 0, _PATH_MEM);
882 				break;
883 			}
884 			/*
885 			 * If kvm_kvatop returns a bogus value or our core
886 			 * file is truncated, we might wind up seeking beyond
887 			 * the end of the core file in which case the read will
888 			 * return 0 (EOF).
889 			 */
890 			if (cc == 0)
891 				break;
892 			cp = (char *)cp + cc;
893 			kva += cc;
894 			len -= cc;
895 		}
896 		return ((char *)cp - (char *)buf);
897 	}
898 	/* NOTREACHED */
899 }
900 DEF(kvm_read);
901 
902 ssize_t
903 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
904 {
905 	int cc;
906 
907 	if (ISALIVE(kd)) {
908 		/*
909 		 * Just like kvm_read, only we write.
910 		 */
911 		cc = _kvm_pwrite(kd, kd->vmfd, buf, len, (off_t)kva);
912 		if (cc == -1) {
913 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
914 			return (-1);
915 		} else if (cc < len)
916 			_kvm_err(kd, kd->program, "short write");
917 		return (cc);
918 	} else {
919 		_kvm_err(kd, kd->program,
920 		    "kvm_write not implemented for dead kernels");
921 		return (-1);
922 	}
923 	/* NOTREACHED */
924 }
925