xref: /openbsd/lib/libkvm/kvm.c (revision 3d8817e4)
1 /*	$OpenBSD: kvm.c,v 1.49 2011/03/12 04:54:28 guenther Exp $ */
2 /*	$NetBSD: kvm.c,v 1.43 1996/05/05 04:31:59 gwr Exp $	*/
3 
4 /*-
5  * Copyright (c) 1989, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software developed by the Computer Systems
9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10  * BG 91-66 and contributed to Berkeley.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/user.h>
39 #include <sys/proc.h>
40 #include <sys/ioctl.h>
41 #include <sys/stat.h>
42 #include <sys/sysctl.h>
43 
44 #include <sys/core.h>
45 #include <sys/exec_aout.h>
46 #include <sys/kcore.h>
47 
48 #include <ctype.h>
49 #include <db.h>
50 #include <fcntl.h>
51 #include <libgen.h>
52 #include <limits.h>
53 #include <nlist.h>
54 #include <paths.h>
55 #include <stdio.h>
56 #include <stdlib.h>
57 #include <string.h>
58 #include <unistd.h>
59 #include <kvm.h>
60 #include <stdarg.h>
61 
62 #include "kvm_private.h"
63 
64 extern int __fdnlist(int, struct nlist *);
65 
66 static int	kvm_dbopen(kvm_t *, const char *);
67 static int	_kvm_get_header(kvm_t *);
68 static kvm_t	*_kvm_open(kvm_t *, const char *, const char *, const char *,
69 		     int, char *);
70 static int	clear_gap(kvm_t *, FILE *, int);
71 static int	kvm_setfd(kvm_t *);
72 
73 char *
74 kvm_geterr(kd)
75 	kvm_t *kd;
76 {
77 	return (kd->errbuf);
78 }
79 
80 /*
81  * Wrapper around pread.
82  */
83 ssize_t
84 _kvm_pread(kvm_t *kd, int fd, void *buf, size_t nbytes, off_t offset)
85 {
86 	ssize_t rval;
87 
88 	errno = 0;
89 	rval = pread(fd, buf, nbytes, offset);
90 	if (rval == -1 || errno != 0) {
91 		_kvm_syserr(kd, kd->program, "pread");
92 	}
93 	return (rval);
94 }
95 
96 /*
97  * Wrapper around pwrite.
98  */
99 ssize_t
100 _kvm_pwrite(kvm_t *kd, int fd, const void *buf, size_t nbytes, off_t offset)
101 {
102 	ssize_t rval;
103 
104 	errno = 0;
105 	rval = pwrite(fd, buf, nbytes, offset);
106 	if (rval == -1 || errno != 0) {
107 		_kvm_syserr(kd, kd->program, "pwrite");
108 	}
109 	return (rval);
110 }
111 
112 /*
113  * Report an error using printf style arguments.  "program" is kd->program
114  * on hard errors, and 0 on soft errors, so that under sun error emulation,
115  * only hard errors are printed out (otherwise, programs like gdb will
116  * generate tons of error messages when trying to access bogus pointers).
117  */
118 void
119 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
120 {
121 	va_list ap;
122 
123 	va_start(ap, fmt);
124 	if (program != NULL) {
125 		(void)fprintf(stderr, "%s: ", program);
126 		(void)vfprintf(stderr, fmt, ap);
127 		(void)fputc('\n', stderr);
128 	} else
129 		(void)vsnprintf(kd->errbuf,
130 		    sizeof(kd->errbuf), fmt, ap);
131 
132 	va_end(ap);
133 }
134 
135 void
136 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
137 {
138 	va_list ap;
139 	size_t n;
140 
141 	va_start(ap, fmt);
142 	if (program != NULL) {
143 		(void)fprintf(stderr, "%s: ", program);
144 		(void)vfprintf(stderr, fmt, ap);
145 		(void)fprintf(stderr, ": %s\n", strerror(errno));
146 	} else {
147 		char *cp = kd->errbuf;
148 
149 		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
150 		n = strlen(cp);
151 		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
152 		    strerror(errno));
153 	}
154 	va_end(ap);
155 }
156 
157 void *
158 _kvm_malloc(kvm_t *kd, size_t n)
159 {
160 	void *p;
161 
162 	if ((p = malloc(n)) == NULL)
163 		_kvm_err(kd, kd->program, "%s", strerror(errno));
164 	return (p);
165 }
166 
167 static kvm_t *
168 _kvm_open(kvm_t *kd, const char *uf, const char *mf, const char *sf,
169     int flag, char *errout)
170 {
171 	struct stat st;
172 
173 	kd->db = 0;
174 	kd->pmfd = -1;
175 	kd->vmfd = -1;
176 	kd->swfd = -1;
177 	kd->nlfd = -1;
178 	kd->alive = 0;
179 	kd->filebase = 0;
180 	kd->procbase = 0;
181 	kd->nbpg = getpagesize();
182 	kd->swapspc = 0;
183 	kd->argspc = 0;
184 	kd->argbuf = 0;
185 	kd->argv = 0;
186 	kd->vmst = NULL;
187 	kd->vm_page_buckets = 0;
188 	kd->kcore_hdr = 0;
189 	kd->cpu_dsize = 0;
190 	kd->cpu_data = 0;
191 	kd->dump_off = 0;
192 
193 	if (flag & KVM_NO_FILES) {
194 		kd->alive = 1;
195 		return (kd);
196 	}
197 
198 	if (uf && strlen(uf) >= MAXPATHLEN) {
199 		_kvm_err(kd, kd->program, "exec file name too long");
200 		goto failed;
201 	}
202 	if (flag & ~O_ACCMODE) {
203 		_kvm_err(kd, kd->program, "bad flags arg");
204 		goto failed;
205 	}
206 	if (mf == 0)
207 		mf = _PATH_MEM;
208 
209 	if ((kd->pmfd = open(mf, flag, 0)) < 0) {
210 		_kvm_syserr(kd, kd->program, "%s", mf);
211 		goto failed;
212 	}
213 	if (fstat(kd->pmfd, &st) < 0) {
214 		_kvm_syserr(kd, kd->program, "%s", mf);
215 		goto failed;
216 	}
217 	if (S_ISCHR(st.st_mode)) {
218 		/*
219 		 * If this is a character special device, then check that
220 		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
221 		 * make it work for either /dev/mem or /dev/kmem -- in either
222 		 * case you're working with a live kernel.)
223 		 */
224 		if (strcmp(mf, _PATH_MEM) != 0) {	/* XXX */
225 			_kvm_err(kd, kd->program,
226 				 "%s: not physical memory device", mf);
227 			goto failed;
228 		}
229 		if ((kd->vmfd = open(_PATH_KMEM, flag, 0)) < 0) {
230 			_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
231 			goto failed;
232 		}
233 		kd->alive = 1;
234 		if (sf != NULL && (kd->swfd = open(sf, flag, 0)) < 0) {
235 			_kvm_syserr(kd, kd->program, "%s", sf);
236 			goto failed;
237 		}
238 		/*
239 		 * Open kvm nlist database.  We only try to use
240 		 * the pre-built database if the namelist file name
241 		 * pointer is NULL.  If the database cannot or should
242 		 * not be opened, open the namelist argument so we
243 		 * revert to slow nlist() calls.
244 		 * If no file is specified, try opening _PATH_KSYMS and
245 		 * fall back to _PATH_UNIX.
246 		 */
247 		if (kvm_dbopen(kd, uf ? uf : _PATH_UNIX) == -1 &&
248 		    ((uf && (kd->nlfd = open(uf, O_RDONLY, 0)) == -1) || (!uf &&
249 		    (kd->nlfd = open((uf = _PATH_KSYMS), O_RDONLY, 0)) == -1 &&
250 		    (kd->nlfd = open((uf = _PATH_UNIX), O_RDONLY, 0)) == -1))) {
251 			_kvm_syserr(kd, kd->program, "%s", uf);
252 			goto failed;
253 		}
254 	} else {
255 		/*
256 		 * This is a crash dump.
257 		 * Initialize the virtual address translation machinery,
258 		 * but first setup the namelist fd.
259 		 * If no file is specified, try opening _PATH_KSYMS and
260 		 * fall back to _PATH_UNIX.
261 		 */
262 		if ((uf && (kd->nlfd = open(uf, O_RDONLY, 0)) == -1) || (!uf &&
263 		    (kd->nlfd = open((uf = _PATH_KSYMS), O_RDONLY, 0)) == -1 &&
264 		    (kd->nlfd = open((uf = _PATH_UNIX), O_RDONLY, 0)) == -1)) {
265 			_kvm_syserr(kd, kd->program, "%s", uf);
266 			goto failed;
267 		}
268 
269 		/*
270 		 * If there is no valid core header, fail silently here.
271 		 * The address translations however will fail without
272 		 * header. Things can be made to run by calling
273 		 * kvm_dump_mkheader() before doing any translation.
274 		 */
275 		if (_kvm_get_header(kd) == 0) {
276 			if (_kvm_initvtop(kd) < 0)
277 				goto failed;
278 		}
279 	}
280 	if (kvm_setfd(kd) == 0)
281 		return (kd);
282 	else
283 		_kvm_syserr(kd, kd->program, "can't set close on exec flag");
284 failed:
285 	/*
286 	 * Copy out the error if doing sane error semantics.
287 	 */
288 	if (errout != 0)
289 		(void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
290 	(void)kvm_close(kd);
291 	return (0);
292 }
293 
294 /*
295  * The kernel dump file (from savecore) contains:
296  *    kcore_hdr_t kcore_hdr;
297  *    kcore_seg_t cpu_hdr;
298  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
299  *    kcore_seg_t mem_hdr;
300  *    (memory)    mem_data; (size is mem_hdr.c_size)
301  *
302  * Note: khdr is padded to khdr.c_hdrsize;
303  * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
304  */
305 static int
306 _kvm_get_header(kvm_t *kd)
307 {
308 	kcore_hdr_t	kcore_hdr;
309 	kcore_seg_t	cpu_hdr;
310 	kcore_seg_t	mem_hdr;
311 	size_t		offset;
312 	ssize_t		sz;
313 
314 	/*
315 	 * Read the kcore_hdr_t
316 	 */
317 	sz = _kvm_pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
318 	if (sz != sizeof(kcore_hdr)) {
319 		return (-1);
320 	}
321 
322 	/*
323 	 * Currently, we only support dump-files made by the current
324 	 * architecture...
325 	 */
326 	if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
327 	    (CORE_GETMID(kcore_hdr) != MID_MACHINE))
328 		return (-1);
329 
330 	/*
331 	 * Currently, we only support exactly 2 segments: cpu-segment
332 	 * and data-segment in exactly that order.
333 	 */
334 	if (kcore_hdr.c_nseg != 2)
335 		return (-1);
336 
337 	/*
338 	 * Save away the kcore_hdr.  All errors after this
339 	 * should do a to "goto fail" to deallocate things.
340 	 */
341 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
342 	if (kd->kcore_hdr == NULL)
343 		goto fail;
344 	memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
345 	offset = kcore_hdr.c_hdrsize;
346 
347 	/*
348 	 * Read the CPU segment header
349 	 */
350 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
351 	if (sz != sizeof(cpu_hdr)) {
352 		goto fail;
353 	}
354 
355 	if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
356 	    (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
357 		goto fail;
358 	offset += kcore_hdr.c_seghdrsize;
359 
360 	/*
361 	 * Read the CPU segment DATA.
362 	 */
363 	kd->cpu_dsize = cpu_hdr.c_size;
364 	kd->cpu_data = _kvm_malloc(kd, (size_t)cpu_hdr.c_size);
365 	if (kd->cpu_data == NULL)
366 		goto fail;
367 
368 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
369 	    (off_t)offset);
370 	if (sz != (size_t)cpu_hdr.c_size) {
371 		goto fail;
372 	}
373 
374 	offset += cpu_hdr.c_size;
375 
376 	/*
377 	 * Read the next segment header: data segment
378 	 */
379 	sz = _kvm_pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
380 	if (sz != sizeof(mem_hdr)) {
381 		goto fail;
382 	}
383 
384 	offset += kcore_hdr.c_seghdrsize;
385 
386 	if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
387 	    (CORE_GETFLAG(mem_hdr) != CORE_DATA))
388 		goto fail;
389 
390 	kd->dump_off = offset;
391 	return (0);
392 
393 fail:
394 	if (kd->kcore_hdr != NULL) {
395 		free(kd->kcore_hdr);
396 		kd->kcore_hdr = NULL;
397 	}
398 	if (kd->cpu_data != NULL) {
399 		free(kd->cpu_data);
400 		kd->cpu_data = NULL;
401 		kd->cpu_dsize = 0;
402 	}
403 
404 	return (-1);
405 }
406 
407 /*
408  * The format while on the dump device is: (new format)
409  *    kcore_seg_t cpu_hdr;
410  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
411  *    kcore_seg_t mem_hdr;
412  *    (memory)    mem_data; (size is mem_hdr.c_size)
413  */
414 int
415 kvm_dump_mkheader(kvm_t *kd, off_t dump_off)
416 {
417 	kcore_seg_t	cpu_hdr;
418 	int	hdr_size;
419 	ssize_t sz;
420 
421 	if (kd->kcore_hdr != NULL) {
422 	    _kvm_err(kd, kd->program, "already has a dump header");
423 	    return (-1);
424 	}
425 	if (ISALIVE(kd)) {
426 		_kvm_err(kd, kd->program, "don't use on live kernel");
427 		return (-1);
428 	}
429 
430 	/*
431 	 * Validate new format crash dump
432 	 */
433 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)dump_off);
434 	if (sz != sizeof(cpu_hdr)) {
435 		return (-1);
436 	}
437 	if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
438 		|| (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
439 		_kvm_err(kd, 0, "invalid magic in cpu_hdr");
440 		return (-1);
441 	}
442 	hdr_size = ALIGN(sizeof(cpu_hdr));
443 
444 	/*
445 	 * Read the CPU segment.
446 	 */
447 	kd->cpu_dsize = cpu_hdr.c_size;
448 	kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
449 	if (kd->cpu_data == NULL)
450 		goto fail;
451 
452 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
453 	    (off_t)dump_off+hdr_size);
454 	if (sz != (ssize_t)cpu_hdr.c_size) {
455 		_kvm_err(kd, 0, "invalid size in cpu_hdr");
456 		goto fail;
457 	}
458 	hdr_size += kd->cpu_dsize;
459 
460 	/*
461 	 * Leave phys mem pointer at beginning of memory data
462 	 */
463 	kd->dump_off = dump_off + hdr_size;
464 	errno = 0;
465 	if (lseek(kd->pmfd, kd->dump_off, SEEK_SET) != kd->dump_off && errno != 0) {
466 		_kvm_err(kd, 0, "invalid dump offset - lseek");
467 		goto fail;
468 	}
469 
470 	/*
471 	 * Create a kcore_hdr.
472 	 */
473 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
474 	if (kd->kcore_hdr == NULL)
475 		goto fail;
476 
477 	kd->kcore_hdr->c_hdrsize    = ALIGN(sizeof(kcore_hdr_t));
478 	kd->kcore_hdr->c_seghdrsize = ALIGN(sizeof(kcore_seg_t));
479 	kd->kcore_hdr->c_nseg       = 2;
480 	CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
481 
482 	/*
483 	 * Now that we have a valid header, enable translations.
484 	 */
485 	if (_kvm_initvtop(kd) == 0)
486 		/* Success */
487 		return (hdr_size);
488 
489 fail:
490 	if (kd->kcore_hdr != NULL) {
491 		free(kd->kcore_hdr);
492 		kd->kcore_hdr = NULL;
493 	}
494 	if (kd->cpu_data != NULL) {
495 		free(kd->cpu_data);
496 		kd->cpu_data = NULL;
497 		kd->cpu_dsize = 0;
498 	}
499 	return (-1);
500 }
501 
502 static int
503 clear_gap(kvm_t *kd, FILE *fp, int size)
504 {
505 	if (size <= 0) /* XXX - < 0 should never happen */
506 		return (0);
507 	while (size-- > 0) {
508 		if (fputc(0, fp) == EOF) {
509 			_kvm_syserr(kd, kd->program, "clear_gap");
510 			return (-1);
511 		}
512 	}
513 	return (0);
514 }
515 
516 /*
517  * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
518  * because 'fp' might be a file pointer obtained by zopen().
519  */
520 int
521 kvm_dump_wrtheader(kvm_t *kd, FILE *fp, int dumpsize)
522 {
523 	kcore_seg_t	seghdr;
524 	long		offset;
525 	int		gap;
526 
527 	if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
528 		_kvm_err(kd, kd->program, "no valid dump header(s)");
529 		return (-1);
530 	}
531 
532 	/*
533 	 * Write the generic header
534 	 */
535 	offset = 0;
536 	if (fwrite(kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) < 1) {
537 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
538 		return (-1);
539 	}
540 	offset += kd->kcore_hdr->c_hdrsize;
541 	gap     = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
542 	if (clear_gap(kd, fp, gap) == -1)
543 		return (-1);
544 
545 	/*
546 	 * Write the cpu header
547 	 */
548 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
549 	seghdr.c_size = (u_long)ALIGN(kd->cpu_dsize);
550 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
551 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
552 		return (-1);
553 	}
554 	offset += kd->kcore_hdr->c_seghdrsize;
555 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
556 	if (clear_gap(kd, fp, gap) == -1)
557 		return (-1);
558 
559 	if (fwrite(kd->cpu_data, kd->cpu_dsize, 1, fp) < 1) {
560 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
561 		return (-1);
562 	}
563 	offset += seghdr.c_size;
564 	gap     = seghdr.c_size - kd->cpu_dsize;
565 	if (clear_gap(kd, fp, gap) == -1)
566 		return (-1);
567 
568 	/*
569 	 * Write the actual dump data segment header
570 	 */
571 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
572 	seghdr.c_size = dumpsize;
573 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
574 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
575 		return (-1);
576 	}
577 	offset += kd->kcore_hdr->c_seghdrsize;
578 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
579 	if (clear_gap(kd, fp, gap) == -1)
580 		return (-1);
581 
582 	return (offset);
583 }
584 
585 kvm_t *
586 kvm_openfiles(const char *uf, const char *mf, const char *sf,
587     int flag, char *errout)
588 {
589 	kvm_t *kd;
590 
591 	if ((kd = malloc(sizeof(*kd))) == NULL) {
592 		(void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
593 		return (0);
594 	}
595 	kd->program = 0;
596 	return (_kvm_open(kd, uf, mf, sf, flag, errout));
597 }
598 
599 kvm_t *
600 kvm_open(const char *uf, const char *mf, const char *sf, int flag,
601     const char *program)
602 {
603 	kvm_t *kd;
604 
605 	if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
606 		(void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
607 		return (0);
608 	}
609 	kd->program = program;
610 	return (_kvm_open(kd, uf, mf, sf, flag, NULL));
611 }
612 
613 int
614 kvm_close(kvm_t *kd)
615 {
616 	int error = 0;
617 
618 	if (kd->pmfd >= 0)
619 		error |= close(kd->pmfd);
620 	if (kd->vmfd >= 0)
621 		error |= close(kd->vmfd);
622 	kd->alive = 0;
623 	if (kd->nlfd >= 0)
624 		error |= close(kd->nlfd);
625 	if (kd->swfd >= 0)
626 		error |= close(kd->swfd);
627 	if (kd->db != 0)
628 		error |= (kd->db->close)(kd->db);
629 	if (kd->vmst)
630 		_kvm_freevtop(kd);
631 	kd->cpu_dsize = 0;
632 	if (kd->cpu_data != NULL)
633 		free((void *)kd->cpu_data);
634 	if (kd->kcore_hdr != NULL)
635 		free((void *)kd->kcore_hdr);
636 	if (kd->filebase != 0)
637 		free((void *)kd->filebase);
638 	if (kd->procbase != 0)
639 		free((void *)kd->procbase);
640 	if (kd->swapspc != 0)
641 		free((void *)kd->swapspc);
642 	if (kd->argspc != 0)
643 		free((void *)kd->argspc);
644 	if (kd->argbuf != 0)
645 		free((void *)kd->argbuf);
646 	if (kd->argv != 0)
647 		free((void *)kd->argv);
648 	free((void *)kd);
649 
650 	return (error);
651 }
652 
653 /*
654  * Set up state necessary to do queries on the kernel namelist
655  * data base.  If the data base is out-of-data/incompatible with
656  * given executable, set up things so we revert to standard nlist call.
657  * Only called for live kernels.  Return 0 on success, -1 on failure.
658  */
659 static int
660 kvm_dbopen(kvm_t *kd, const char *uf)
661 {
662 	char dbversion[_POSIX2_LINE_MAX], kversion[_POSIX2_LINE_MAX];
663 	char dbname[MAXPATHLEN];
664 	struct nlist nitem;
665 	size_t dbversionlen;
666 	DBT rec;
667 
668 	uf = basename(uf);
669 
670 	(void)snprintf(dbname, sizeof(dbname), "%skvm_%s.db", _PATH_VARDB, uf);
671 	kd->db = dbopen(dbname, O_RDONLY, 0, DB_HASH, NULL);
672 	if (kd->db == NULL) {
673 		switch (errno) {
674 		case ENOENT:
675 			/* No kvm_bsd.db, fall back to /bsd silently */
676 			break;
677 		case EFTYPE:
678 			_kvm_err(kd, kd->program,
679 			    "file %s is incorrectly formatted", dbname);
680 			break;
681 		case EINVAL:
682 			_kvm_err(kd, kd->program,
683 			    "invalid argument to dbopen()");
684 			break;
685 		default:
686 			_kvm_err(kd, kd->program, "unknown dbopen() error");
687 			break;
688 		}
689 		return (-1);
690 	}
691 
692 	/*
693 	 * read version out of database
694 	 */
695 	rec.data = VRS_KEY;
696 	rec.size = sizeof(VRS_KEY) - 1;
697 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
698 		goto close;
699 	if (rec.data == 0 || rec.size > sizeof(dbversion))
700 		goto close;
701 
702 	bcopy(rec.data, dbversion, rec.size);
703 	dbversionlen = rec.size;
704 
705 	/*
706 	 * Read version string from kernel memory.
707 	 * Since we are dealing with a live kernel, we can call kvm_read()
708 	 * at this point.
709 	 */
710 	rec.data = VRS_SYM;
711 	rec.size = sizeof(VRS_SYM) - 1;
712 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
713 		goto close;
714 	if (rec.data == 0 || rec.size != sizeof(struct nlist))
715 		goto close;
716 	bcopy(rec.data, &nitem, sizeof(nitem));
717 	if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
718 	    dbversionlen)
719 		goto close;
720 	/*
721 	 * If they match, we win - otherwise clear out kd->db so
722 	 * we revert to slow nlist().
723 	 */
724 	if (bcmp(dbversion, kversion, dbversionlen) == 0)
725 		return (0);
726 close:
727 	(void)(kd->db->close)(kd->db);
728 	kd->db = 0;
729 
730 	return (-1);
731 }
732 
733 int
734 kvm_nlist(kvm_t *kd, struct nlist *nl)
735 {
736 	struct nlist *p;
737 	int nvalid, rv;
738 
739 	/*
740 	 * If we can't use the data base, revert to the
741 	 * slow library call.
742 	 */
743 	if (kd->db == 0) {
744 		rv = __fdnlist(kd->nlfd, nl);
745 		if (rv == -1)
746 			_kvm_err(kd, 0, "bad namelist");
747 		return (rv);
748 	}
749 
750 	/*
751 	 * We can use the kvm data base.  Go through each nlist entry
752 	 * and look it up with a db query.
753 	 */
754 	nvalid = 0;
755 	for (p = nl; p->n_name && p->n_name[0]; ++p) {
756 		size_t len;
757 		DBT rec;
758 
759 		if ((len = strlen(p->n_name)) > 4096) {
760 			/* sanity */
761 			_kvm_err(kd, kd->program, "symbol too large");
762 			return (-1);
763 		}
764 		rec.data = p->n_name;
765 		rec.size = len;
766 
767 		/*
768 		 * Make sure that n_value = 0 when the symbol isn't found
769 		 */
770 		p->n_value = 0;
771 
772 		if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
773 			continue;
774 		if (rec.data == 0 || rec.size != sizeof(struct nlist))
775 			continue;
776 		++nvalid;
777 		/*
778 		 * Avoid alignment issues.
779 		 */
780 		bcopy(&((struct nlist *)rec.data)->n_type,
781 		    &p->n_type, sizeof(p->n_type));
782 		bcopy(&((struct nlist *)rec.data)->n_value,
783 		    &p->n_value, sizeof(p->n_value));
784 	}
785 	/*
786 	 * Return the number of entries that weren't found.
787 	 */
788 	return ((p - nl) - nvalid);
789 }
790 
791 int
792 kvm_dump_inval(kvm_t *kd)
793 {
794 	struct nlist	nl[2];
795 	u_long		x;
796 	paddr_t		pa;
797 
798 	if (ISALIVE(kd)) {
799 		_kvm_err(kd, kd->program, "clearing dump on live kernel");
800 		return (-1);
801 	}
802 	nl[0].n_name = "_dumpmag";
803 	nl[1].n_name = NULL;
804 
805 	if (kvm_nlist(kd, nl) == -1) {
806 		_kvm_err(kd, 0, "bad namelist");
807 		return (-1);
808 	}
809 
810 	if (nl[0].n_value == 0) {
811 		_kvm_err(kd, nl[0].n_name, "not in name list");
812 		return (-1);
813 	}
814 
815 	if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
816 		return (-1);
817 
818 	x = 0;
819 	if (_kvm_pwrite(kd, kd->pmfd, &x, sizeof(x),
820 	    (off_t)_kvm_pa2off(kd, pa)) != sizeof(x)) {
821 		_kvm_err(kd, 0, "cannot invalidate dump");
822 		return (-1);
823 	}
824 	return (0);
825 }
826 
827 ssize_t
828 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
829 {
830 	ssize_t cc;
831 	void *cp;
832 
833 	if (ISALIVE(kd)) {
834 		/*
835 		 * We're using /dev/kmem.  Just read straight from the
836 		 * device and let the active kernel do the address translation.
837 		 */
838 		cc = _kvm_pread(kd, kd->vmfd, buf, len, (off_t)kva);
839 		if (cc == -1) {
840 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
841 			return (-1);
842 		} else if (cc < len)
843 			_kvm_err(kd, kd->program, "short read");
844 		return (cc);
845 	} else {
846 		if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
847 			_kvm_err(kd, kd->program, "no valid dump header");
848 			return (-1);
849 		}
850 		cp = buf;
851 		while (len > 0) {
852 			paddr_t	pa;
853 
854 			/* In case of error, _kvm_kvatop sets the err string */
855 			cc = _kvm_kvatop(kd, kva, &pa);
856 			if (cc == 0)
857 				return (-1);
858 			if (cc > len)
859 				cc = len;
860 			cc = _kvm_pread(kd, kd->pmfd, cp, (size_t)cc,
861 			    (off_t)_kvm_pa2off(kd, pa));
862 			if (cc == -1) {
863 				_kvm_syserr(kd, 0, _PATH_MEM);
864 				break;
865 			}
866 			/*
867 			 * If kvm_kvatop returns a bogus value or our core
868 			 * file is truncated, we might wind up seeking beyond
869 			 * the end of the core file in which case the read will
870 			 * return 0 (EOF).
871 			 */
872 			if (cc == 0)
873 				break;
874 			cp = (char *)cp + cc;
875 			kva += cc;
876 			len -= cc;
877 		}
878 		return ((char *)cp - (char *)buf);
879 	}
880 	/* NOTREACHED */
881 }
882 
883 ssize_t
884 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
885 {
886 	int cc;
887 
888 	if (ISALIVE(kd)) {
889 		/*
890 		 * Just like kvm_read, only we write.
891 		 */
892 		cc = _kvm_pwrite(kd, kd->vmfd, buf, len, (off_t)kva);
893 		if (cc == -1) {
894 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
895 			return (-1);
896 		} else if (cc < len)
897 			_kvm_err(kd, kd->program, "short write");
898 		return (cc);
899 	} else {
900 		_kvm_err(kd, kd->program,
901 		    "kvm_write not implemented for dead kernels");
902 		return (-1);
903 	}
904 	/* NOTREACHED */
905 }
906 
907 static int
908 kvm_setfd(kvm_t *kd)
909 {
910 	if (kd->pmfd >= 0 && fcntl(kd->pmfd, F_SETFD, FD_CLOEXEC) < 0)
911 		return (-1);
912 	if (kd->vmfd >= 0 && fcntl(kd->vmfd, F_SETFD, FD_CLOEXEC) < 0)
913 		return (-1);
914 	if (kd->nlfd >= 0 && fcntl(kd->nlfd, F_SETFD, FD_CLOEXEC) < 0)
915 		return (-1);
916 	if (kd->swfd >= 0 && fcntl(kd->swfd, F_SETFD, FD_CLOEXEC) < 0)
917 		return (-1);
918 
919 	return (0);
920 }
921