xref: /openbsd/lib/libkvm/kvm.c (revision 4ec73a12)
1 /*	$OpenBSD: kvm.c,v 1.72 2022/02/22 17:35:01 deraadt Exp $ */
2 /*	$NetBSD: kvm.c,v 1.43 1996/05/05 04:31:59 gwr Exp $	*/
3 
4 /*-
5  * Copyright (c) 1989, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software developed by the Computer Systems
9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10  * BG 91-66 and contributed to Berkeley.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>	/* MID_MACHINE */
38 #include <sys/types.h>
39 #include <sys/signal.h>
40 #include <sys/proc.h>
41 #include <sys/ioctl.h>
42 #include <sys/stat.h>
43 #include <sys/sysctl.h>
44 
45 #include <sys/core.h>
46 #include <sys/exec.h>
47 #include <sys/kcore.h>
48 
49 #include <stddef.h>
50 #include <errno.h>
51 #include <ctype.h>
52 #include <db.h>
53 #include <fcntl.h>
54 #include <libgen.h>
55 #include <limits.h>
56 #include <nlist.h>
57 #include <paths.h>
58 #include <stdio.h>
59 #include <stdlib.h>
60 #include <string.h>
61 #include <unistd.h>
62 #include <kvm.h>
63 #include <stdarg.h>
64 
65 #include "kvm_private.h"
66 
67 extern int __fdnlist(int, struct nlist *);
68 
69 static int	kvm_dbopen(kvm_t *, const char *);
70 static int	kvm_opennamelist(kvm_t *, const char *);
71 static int	_kvm_get_header(kvm_t *);
72 static kvm_t	*_kvm_open(kvm_t *, const char *, const char *, const char *,
73 		     int, char *);
74 static int	clear_gap(kvm_t *, FILE *, int);
75 
76 char *
kvm_geterr(kvm_t * kd)77 kvm_geterr(kvm_t *kd)
78 {
79 	return (kd->errbuf);
80 }
81 
82 /*
83  * Wrapper around pread.
84  */
85 ssize_t
_kvm_pread(kvm_t * kd,int fd,void * buf,size_t nbytes,off_t offset)86 _kvm_pread(kvm_t *kd, int fd, void *buf, size_t nbytes, off_t offset)
87 {
88 	ssize_t rval;
89 
90 	errno = 0;
91 	rval = pread(fd, buf, nbytes, offset);
92 	if (rval == -1 || errno != 0) {
93 		_kvm_syserr(kd, kd->program, "pread");
94 	}
95 	return (rval);
96 }
97 
98 /*
99  * Wrapper around pwrite.
100  */
101 ssize_t
_kvm_pwrite(kvm_t * kd,int fd,const void * buf,size_t nbytes,off_t offset)102 _kvm_pwrite(kvm_t *kd, int fd, const void *buf, size_t nbytes, off_t offset)
103 {
104 	ssize_t rval;
105 
106 	errno = 0;
107 	rval = pwrite(fd, buf, nbytes, offset);
108 	if (rval == -1 || errno != 0) {
109 		_kvm_syserr(kd, kd->program, "pwrite");
110 	}
111 	return (rval);
112 }
113 
114 /*
115  * Report an error using printf style arguments.  "program" is kd->program
116  * on hard errors, and 0 on soft errors, so that under sun error emulation,
117  * only hard errors are printed out (otherwise, programs like gdb will
118  * generate tons of error messages when trying to access bogus pointers).
119  */
120 void
_kvm_err(kvm_t * kd,const char * program,const char * fmt,...)121 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
122 {
123 	va_list ap;
124 
125 	va_start(ap, fmt);
126 	if (program != NULL) {
127 		(void)fprintf(stderr, "%s: ", program);
128 		(void)vfprintf(stderr, fmt, ap);
129 		(void)fputc('\n', stderr);
130 	} else
131 		(void)vsnprintf(kd->errbuf,
132 		    sizeof(kd->errbuf), fmt, ap);
133 
134 	va_end(ap);
135 }
136 
137 void
_kvm_syserr(kvm_t * kd,const char * program,const char * fmt,...)138 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
139 {
140 	va_list ap;
141 	size_t n;
142 
143 	va_start(ap, fmt);
144 	if (program != NULL) {
145 		(void)fprintf(stderr, "%s: ", program);
146 		(void)vfprintf(stderr, fmt, ap);
147 		(void)fprintf(stderr, ": %s\n", strerror(errno));
148 	} else {
149 		char *cp = kd->errbuf;
150 
151 		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
152 		n = strlen(cp);
153 		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
154 		    strerror(errno));
155 	}
156 	va_end(ap);
157 }
158 
159 void *
_kvm_malloc(kvm_t * kd,size_t n)160 _kvm_malloc(kvm_t *kd, size_t n)
161 {
162 	void *p;
163 
164 	if ((p = malloc(n)) == NULL)
165 		_kvm_err(kd, kd->program, "%s", strerror(errno));
166 	return (p);
167 }
168 
169 void *
_kvm_realloc(kvm_t * kd,void * p,size_t n)170 _kvm_realloc(kvm_t *kd, void *p, size_t n)
171 {
172 	if ((p = realloc(p, n)) == NULL)
173 		_kvm_err(kd, kd->program, "%s", strerror(errno));
174 	return (p);
175 }
176 
177 static kvm_t *
_kvm_open(kvm_t * kd,const char * uf,const char * mf,const char * sf,int flag,char * errout)178 _kvm_open(kvm_t *kd, const char *uf, const char *mf, const char *sf,
179     int flag, char *errout)
180 {
181 	struct stat st;
182 
183 	kd->db = 0;
184 	kd->pmfd = -1;
185 	kd->vmfd = -1;
186 	kd->swfd = -1;
187 	kd->nlfd = -1;
188 	kd->alive = 0;
189 	kd->filebase = NULL;
190 	kd->procbase = NULL;
191 	kd->nbpg = getpagesize();
192 	kd->swapspc = 0;
193 	kd->argspc = 0;
194 	kd->argbuf = 0;
195 	kd->argv = 0;
196 	kd->envspc = 0;
197 	kd->envbuf = 0;
198 	kd->envp = 0;
199 	kd->vmst = NULL;
200 	kd->vm_page_buckets = 0;
201 	kd->kcore_hdr = 0;
202 	kd->cpu_dsize = 0;
203 	kd->cpu_data = 0;
204 	kd->dump_off = 0;
205 
206 	if (flag & KVM_NO_FILES) {
207 		kd->alive = 1;
208 		return (kd);
209 	}
210 
211 	if (uf && strlen(uf) >= PATH_MAX) {
212 		_kvm_err(kd, kd->program, "exec file name too long");
213 		goto failed;
214 	}
215 	if (flag != O_RDONLY && flag != O_WRONLY && flag != O_RDWR) {
216 		_kvm_err(kd, kd->program, "bad flags arg");
217 		goto failed;
218 	}
219 	flag |= O_CLOEXEC;
220 
221 	if (mf == NULL)
222 		mf = _PATH_MEM;
223 
224 	if ((kd->pmfd = open(mf, flag)) == -1) {
225 		_kvm_syserr(kd, kd->program, "%s", mf);
226 		goto failed;
227 	}
228 	if (fstat(kd->pmfd, &st) == -1) {
229 		_kvm_syserr(kd, kd->program, "%s", mf);
230 		goto failed;
231 	}
232 	if (S_ISCHR(st.st_mode)) {
233 		/*
234 		 * If this is a character special device, then check that
235 		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
236 		 * make it work for either /dev/mem or /dev/kmem -- in either
237 		 * case you're working with a live kernel.)
238 		 */
239 		if (strcmp(mf, _PATH_MEM) != 0) {	/* XXX */
240 			_kvm_err(kd, kd->program,
241 				 "%s: not physical memory device", mf);
242 			goto failed;
243 		}
244 		if ((kd->vmfd = open(_PATH_KMEM, flag)) == -1) {
245 			_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
246 			goto failed;
247 		}
248 		kd->alive = 1;
249 		if (sf != NULL && (kd->swfd = open(sf, flag)) == -1) {
250 			_kvm_syserr(kd, kd->program, "%s", sf);
251 			goto failed;
252 		}
253 		/*
254 		 * Open kvm nlist database.  We only try to use
255 		 * the pre-built database if the namelist file name
256 		 * pointer is NULL.  If the database cannot or should
257 		 * not be opened, open the namelist argument so we
258 		 * revert to slow nlist() calls.
259 		 * If no file is specified, try opening _PATH_KSYMS and
260 		 * fall back to _PATH_UNIX.
261 		 */
262 		if (kvm_dbopen(kd, uf ? uf : _PATH_UNIX) == -1 &&
263 		    kvm_opennamelist(kd, uf))
264 			goto failed;
265 	} else {
266 		/*
267 		 * This is a crash dump.
268 		 * Initialize the virtual address translation machinery,
269 		 * but first setup the namelist fd.
270 		 * If no file is specified, try opening _PATH_KSYMS and
271 		 * fall back to _PATH_UNIX.
272 		 */
273 		if (kvm_opennamelist(kd, uf))
274 			goto failed;
275 
276 		/*
277 		 * If there is no valid core header, fail silently here.
278 		 * The address translations however will fail without
279 		 * header. Things can be made to run by calling
280 		 * kvm_dump_mkheader() before doing any translation.
281 		 */
282 		if (_kvm_get_header(kd) == 0) {
283 			if (_kvm_initvtop(kd) < 0)
284 				goto failed;
285 		}
286 	}
287 	return (kd);
288 failed:
289 	/*
290 	 * Copy out the error if doing sane error semantics.
291 	 */
292 	if (errout != 0)
293 		(void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
294 	(void)kvm_close(kd);
295 	return (0);
296 }
297 
298 static int
kvm_opennamelist(kvm_t * kd,const char * uf)299 kvm_opennamelist(kvm_t *kd, const char *uf)
300 {
301 	int fd;
302 
303 	if (uf != NULL)
304 		fd = open(uf, O_RDONLY | O_CLOEXEC);
305 	else {
306 		fd = open(_PATH_KSYMS, O_RDONLY | O_CLOEXEC);
307 		uf = _PATH_UNIX;
308 		if (fd == -1)
309 			fd = open(uf, O_RDONLY | O_CLOEXEC);
310 	}
311 	if (fd == -1) {
312 		_kvm_syserr(kd, kd->program, "%s", uf);
313 		return (-1);
314 	}
315 
316 	kd->nlfd = fd;
317 	return (0);
318 }
319 
320 /*
321  * The kernel dump file (from savecore) contains:
322  *    kcore_hdr_t kcore_hdr;
323  *    kcore_seg_t cpu_hdr;
324  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
325  *    kcore_seg_t mem_hdr;
326  *    (memory)    mem_data; (size is mem_hdr.c_size)
327  *
328  * Note: khdr is padded to khdr.c_hdrsize;
329  * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
330  */
331 static int
_kvm_get_header(kvm_t * kd)332 _kvm_get_header(kvm_t *kd)
333 {
334 	kcore_hdr_t	kcore_hdr;
335 	kcore_seg_t	cpu_hdr;
336 	kcore_seg_t	mem_hdr;
337 	size_t		offset;
338 	ssize_t		sz;
339 
340 	/*
341 	 * Read the kcore_hdr_t
342 	 */
343 	sz = _kvm_pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
344 	if (sz != sizeof(kcore_hdr)) {
345 		return (-1);
346 	}
347 
348 	/*
349 	 * Currently, we only support dump-files made by the current
350 	 * architecture...
351 	 */
352 	if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
353 	    (CORE_GETMID(kcore_hdr) != MID_MACHINE))
354 		return (-1);
355 
356 	/*
357 	 * Currently, we only support exactly 2 segments: cpu-segment
358 	 * and data-segment in exactly that order.
359 	 */
360 	if (kcore_hdr.c_nseg != 2)
361 		return (-1);
362 
363 	/*
364 	 * Save away the kcore_hdr.  All errors after this
365 	 * should do a to "goto fail" to deallocate things.
366 	 */
367 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
368 	if (kd->kcore_hdr == NULL)
369 		goto fail;
370 	memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
371 	offset = kcore_hdr.c_hdrsize;
372 
373 	/*
374 	 * Read the CPU segment header
375 	 */
376 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
377 	if (sz != sizeof(cpu_hdr)) {
378 		goto fail;
379 	}
380 
381 	if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
382 	    (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
383 		goto fail;
384 	offset += kcore_hdr.c_seghdrsize;
385 
386 	/*
387 	 * Read the CPU segment DATA.
388 	 */
389 	kd->cpu_dsize = cpu_hdr.c_size;
390 	kd->cpu_data = _kvm_malloc(kd, (size_t)cpu_hdr.c_size);
391 	if (kd->cpu_data == NULL)
392 		goto fail;
393 
394 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
395 	    (off_t)offset);
396 	if (sz != (size_t)cpu_hdr.c_size) {
397 		goto fail;
398 	}
399 
400 	offset += cpu_hdr.c_size;
401 
402 	/*
403 	 * Read the next segment header: data segment
404 	 */
405 	sz = _kvm_pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
406 	if (sz != sizeof(mem_hdr)) {
407 		goto fail;
408 	}
409 
410 	offset += kcore_hdr.c_seghdrsize;
411 
412 	if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
413 	    (CORE_GETFLAG(mem_hdr) != CORE_DATA))
414 		goto fail;
415 
416 	kd->dump_off = offset;
417 	return (0);
418 
419 fail:
420 	free(kd->kcore_hdr);
421 	kd->kcore_hdr = NULL;
422 	if (kd->cpu_data != NULL) {
423 		free(kd->cpu_data);
424 		kd->cpu_data = NULL;
425 		kd->cpu_dsize = 0;
426 	}
427 
428 	return (-1);
429 }
430 
431 /*
432  * The format while on the dump device is: (new format)
433  *    kcore_seg_t cpu_hdr;
434  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
435  *    kcore_seg_t mem_hdr;
436  *    (memory)    mem_data; (size is mem_hdr.c_size)
437  */
438 int
kvm_dump_mkheader(kvm_t * kd,off_t dump_off)439 kvm_dump_mkheader(kvm_t *kd, off_t dump_off)
440 {
441 	kcore_seg_t	cpu_hdr;
442 	int	hdr_size;
443 	ssize_t sz;
444 
445 	if (kd->kcore_hdr != NULL) {
446 	    _kvm_err(kd, kd->program, "already has a dump header");
447 	    return (-1);
448 	}
449 	if (ISALIVE(kd)) {
450 		_kvm_err(kd, kd->program, "don't use on live kernel");
451 		return (-1);
452 	}
453 
454 	/*
455 	 * Validate new format crash dump
456 	 */
457 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)dump_off);
458 	if (sz != sizeof(cpu_hdr)) {
459 		return (-1);
460 	}
461 	if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
462 		|| (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
463 		_kvm_err(kd, 0, "invalid magic in cpu_hdr");
464 		return (-1);
465 	}
466 	hdr_size = _ALIGN(sizeof(cpu_hdr));
467 
468 	/*
469 	 * Read the CPU segment.
470 	 */
471 	kd->cpu_dsize = cpu_hdr.c_size;
472 	kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
473 	if (kd->cpu_data == NULL)
474 		goto fail;
475 
476 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
477 	    (off_t)dump_off+hdr_size);
478 	if (sz != (ssize_t)cpu_hdr.c_size) {
479 		_kvm_err(kd, 0, "invalid size in cpu_hdr");
480 		goto fail;
481 	}
482 	hdr_size += kd->cpu_dsize;
483 
484 	/*
485 	 * Leave phys mem pointer at beginning of memory data
486 	 */
487 	kd->dump_off = dump_off + hdr_size;
488 	errno = 0;
489 	if (lseek(kd->pmfd, kd->dump_off, SEEK_SET) != kd->dump_off && errno != 0) {
490 		_kvm_err(kd, 0, "invalid dump offset - lseek");
491 		goto fail;
492 	}
493 
494 	/*
495 	 * Create a kcore_hdr.
496 	 */
497 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
498 	if (kd->kcore_hdr == NULL)
499 		goto fail;
500 
501 	kd->kcore_hdr->c_hdrsize    = _ALIGN(sizeof(kcore_hdr_t));
502 	kd->kcore_hdr->c_seghdrsize = _ALIGN(sizeof(kcore_seg_t));
503 	kd->kcore_hdr->c_nseg       = 2;
504 	CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
505 
506 	/*
507 	 * Now that we have a valid header, enable translations.
508 	 */
509 	if (_kvm_initvtop(kd) == 0)
510 		/* Success */
511 		return (hdr_size);
512 
513 fail:
514 	free(kd->kcore_hdr);
515 	kd->kcore_hdr = NULL;
516 	if (kd->cpu_data != NULL) {
517 		free(kd->cpu_data);
518 		kd->cpu_data = NULL;
519 		kd->cpu_dsize = 0;
520 	}
521 	return (-1);
522 }
523 
524 static int
clear_gap(kvm_t * kd,FILE * fp,int size)525 clear_gap(kvm_t *kd, FILE *fp, int size)
526 {
527 	if (size <= 0) /* XXX - < 0 should never happen */
528 		return (0);
529 	while (size-- > 0) {
530 		if (fputc(0, fp) == EOF) {
531 			_kvm_syserr(kd, kd->program, "clear_gap");
532 			return (-1);
533 		}
534 	}
535 	return (0);
536 }
537 
538 /*
539  * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
540  * because 'fp' might be a file pointer obtained by zopen().
541  */
542 int
kvm_dump_wrtheader(kvm_t * kd,FILE * fp,int dumpsize)543 kvm_dump_wrtheader(kvm_t *kd, FILE *fp, int dumpsize)
544 {
545 	kcore_seg_t	seghdr;
546 	long		offset;
547 	int		gap;
548 
549 	if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
550 		_kvm_err(kd, kd->program, "no valid dump header(s)");
551 		return (-1);
552 	}
553 
554 	/*
555 	 * Write the generic header
556 	 */
557 	offset = 0;
558 	if (fwrite(kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) < 1) {
559 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
560 		return (-1);
561 	}
562 	offset += kd->kcore_hdr->c_hdrsize;
563 	gap     = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
564 	if (clear_gap(kd, fp, gap) == -1)
565 		return (-1);
566 
567 	/*
568 	 * Write the cpu header
569 	 */
570 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
571 	seghdr.c_size = (u_long)_ALIGN(kd->cpu_dsize);
572 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
573 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
574 		return (-1);
575 	}
576 	offset += kd->kcore_hdr->c_seghdrsize;
577 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
578 	if (clear_gap(kd, fp, gap) == -1)
579 		return (-1);
580 
581 	if (fwrite(kd->cpu_data, kd->cpu_dsize, 1, fp) < 1) {
582 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
583 		return (-1);
584 	}
585 	offset += seghdr.c_size;
586 	gap     = seghdr.c_size - kd->cpu_dsize;
587 	if (clear_gap(kd, fp, gap) == -1)
588 		return (-1);
589 
590 	/*
591 	 * Write the actual dump data segment header
592 	 */
593 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
594 	seghdr.c_size = dumpsize;
595 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
596 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
597 		return (-1);
598 	}
599 	offset += kd->kcore_hdr->c_seghdrsize;
600 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
601 	if (clear_gap(kd, fp, gap) == -1)
602 		return (-1);
603 
604 	return (offset);
605 }
606 
607 kvm_t *
kvm_openfiles(const char * uf,const char * mf,const char * sf,int flag,char * errout)608 kvm_openfiles(const char *uf, const char *mf, const char *sf,
609     int flag, char *errout)
610 {
611 	kvm_t *kd;
612 
613 	if ((kd = malloc(sizeof(*kd))) == NULL) {
614 		(void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
615 		return (0);
616 	}
617 	kd->program = 0;
618 	return (_kvm_open(kd, uf, mf, sf, flag, errout));
619 }
620 
621 kvm_t *
kvm_open(const char * uf,const char * mf,const char * sf,int flag,const char * program)622 kvm_open(const char *uf, const char *mf, const char *sf, int flag,
623     const char *program)
624 {
625 	kvm_t *kd;
626 
627 	if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
628 		(void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
629 		return (0);
630 	}
631 	kd->program = program;
632 	return (_kvm_open(kd, uf, mf, sf, flag, NULL));
633 }
634 
635 int
kvm_close(kvm_t * kd)636 kvm_close(kvm_t *kd)
637 {
638 	int error = 0;
639 
640 	if (kd->pmfd >= 0)
641 		error |= close(kd->pmfd);
642 	if (kd->vmfd >= 0)
643 		error |= close(kd->vmfd);
644 	kd->alive = 0;
645 	if (kd->nlfd >= 0)
646 		error |= close(kd->nlfd);
647 	if (kd->swfd >= 0)
648 		error |= close(kd->swfd);
649 	if (kd->db != 0)
650 		error |= (kd->db->close)(kd->db);
651 	if (kd->vmst)
652 		_kvm_freevtop(kd);
653 	kd->cpu_dsize = 0;
654 	free(kd->cpu_data);
655 	free(kd->kcore_hdr);
656 	free(kd->filebase);
657 	free(kd->procbase);
658 	free(kd->swapspc);
659 	free(kd->argspc);
660 	free(kd->argbuf);
661 	free(kd->argv);
662 	free(kd->envspc);
663 	free(kd->envbuf);
664 	free(kd->envp);
665 	free(kd);
666 
667 	return (error);
668 }
669 DEF(kvm_close);
670 
671 /*
672  * Set up state necessary to do queries on the kernel namelist
673  * data base.  If the data base is out-of-data/incompatible with
674  * given executable, set up things so we revert to standard nlist call.
675  * Only called for live kernels.  Return 0 on success, -1 on failure.
676  */
677 static int
kvm_dbopen(kvm_t * kd,const char * uf)678 kvm_dbopen(kvm_t *kd, const char *uf)
679 {
680 	char dbversion[_POSIX2_LINE_MAX], kversion[_POSIX2_LINE_MAX];
681 	char dbname[PATH_MAX], ufbuf[PATH_MAX];
682 	struct nlist nitem;
683 	size_t dbversionlen;
684 	DBT rec;
685 
686 	strlcpy(ufbuf, uf, sizeof(ufbuf));
687 	uf = basename(ufbuf);
688 
689 	(void)snprintf(dbname, sizeof(dbname), "%skvm_%s.db", _PATH_VARDB, uf);
690 	kd->db = dbopen(dbname, O_RDONLY, 0, DB_HASH, NULL);
691 	if (kd->db == NULL) {
692 		switch (errno) {
693 		case ENOENT:
694 			/* No kvm_bsd.db, fall back to /bsd silently */
695 			break;
696 		case EFTYPE:
697 			_kvm_err(kd, kd->program,
698 			    "file %s is incorrectly formatted", dbname);
699 			break;
700 		case EINVAL:
701 			_kvm_err(kd, kd->program,
702 			    "invalid argument to dbopen()");
703 			break;
704 		default:
705 			_kvm_err(kd, kd->program, "unknown dbopen() error");
706 			break;
707 		}
708 		return (-1);
709 	}
710 
711 	/*
712 	 * read version out of database
713 	 */
714 	rec.data = VRS_KEY;
715 	rec.size = sizeof(VRS_KEY) - 1;
716 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
717 		goto close;
718 	if (rec.data == 0 || rec.size > sizeof(dbversion))
719 		goto close;
720 
721 	bcopy(rec.data, dbversion, rec.size);
722 	dbversionlen = rec.size;
723 
724 	/*
725 	 * Read version string from kernel memory.
726 	 * Since we are dealing with a live kernel, we can call kvm_read()
727 	 * at this point.
728 	 */
729 	rec.data = VRS_SYM;
730 	rec.size = sizeof(VRS_SYM) - 1;
731 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
732 		goto close;
733 	if (rec.data == 0 || rec.size != sizeof(struct nlist))
734 		goto close;
735 	bcopy(rec.data, &nitem, sizeof(nitem));
736 	if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
737 	    dbversionlen)
738 		goto close;
739 	/*
740 	 * If they match, we win - otherwise clear out kd->db so
741 	 * we revert to slow nlist().
742 	 */
743 	if (bcmp(dbversion, kversion, dbversionlen) == 0)
744 		return (0);
745 close:
746 	(void)(kd->db->close)(kd->db);
747 	kd->db = 0;
748 
749 	return (-1);
750 }
751 
752 int
kvm_nlist(kvm_t * kd,struct nlist * nl)753 kvm_nlist(kvm_t *kd, struct nlist *nl)
754 {
755 	struct nlist *p;
756 	int nvalid, rv;
757 
758 	/*
759 	 * If we can't use the data base, revert to the
760 	 * slow library call.
761 	 */
762 	if (kd->db == 0) {
763 		rv = __fdnlist(kd->nlfd, nl);
764 		if (rv == -1)
765 			_kvm_err(kd, 0, "bad namelist");
766 		return (rv);
767 	}
768 
769 	/*
770 	 * We can use the kvm data base.  Go through each nlist entry
771 	 * and look it up with a db query.
772 	 */
773 	nvalid = 0;
774 	for (p = nl; p->n_name && p->n_name[0]; ++p) {
775 		size_t len;
776 		DBT rec;
777 
778 		if ((len = strlen(p->n_name)) > 4096) {
779 			/* sanity */
780 			_kvm_err(kd, kd->program, "symbol too large");
781 			return (-1);
782 		}
783 		rec.data = p->n_name;
784 		rec.size = len;
785 
786 		/*
787 		 * Make sure that n_value = 0 when the symbol isn't found
788 		 */
789 		p->n_value = 0;
790 
791 		if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
792 			continue;
793 		if (rec.data == 0 || rec.size != sizeof(struct nlist))
794 			continue;
795 		++nvalid;
796 		/*
797 		 * Avoid alignment issues.
798 		 */
799 		bcopy((char *)rec.data + offsetof(struct nlist, n_type),
800 		    &p->n_type, sizeof(p->n_type));
801 		bcopy((char *)rec.data + offsetof(struct nlist, n_value),
802 		    &p->n_value, sizeof(p->n_value));
803 	}
804 	/*
805 	 * Return the number of entries that weren't found.
806 	 */
807 	return ((p - nl) - nvalid);
808 }
809 DEF(kvm_nlist);
810 
811 int
kvm_dump_inval(kvm_t * kd)812 kvm_dump_inval(kvm_t *kd)
813 {
814 	struct nlist	nl[2];
815 	u_long		x;
816 	paddr_t		pa;
817 
818 	if (ISALIVE(kd)) {
819 		_kvm_err(kd, kd->program, "clearing dump on live kernel");
820 		return (-1);
821 	}
822 	nl[0].n_name = "_dumpmag";
823 	nl[1].n_name = NULL;
824 
825 	if (kvm_nlist(kd, nl) == -1) {
826 		_kvm_err(kd, 0, "bad namelist");
827 		return (-1);
828 	}
829 
830 	if (nl[0].n_value == 0) {
831 		_kvm_err(kd, nl[0].n_name, "not in name list");
832 		return (-1);
833 	}
834 
835 	if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
836 		return (-1);
837 
838 	x = 0;
839 	if (_kvm_pwrite(kd, kd->pmfd, &x, sizeof(x),
840 	    (off_t)_kvm_pa2off(kd, pa)) != sizeof(x)) {
841 		_kvm_err(kd, 0, "cannot invalidate dump");
842 		return (-1);
843 	}
844 	return (0);
845 }
846 
847 ssize_t
kvm_read(kvm_t * kd,u_long kva,void * buf,size_t len)848 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
849 {
850 	ssize_t cc;
851 	void *cp;
852 
853 	if (ISALIVE(kd)) {
854 		/*
855 		 * We're using /dev/kmem.  Just read straight from the
856 		 * device and let the active kernel do the address translation.
857 		 */
858 		cc = _kvm_pread(kd, kd->vmfd, buf, len, (off_t)kva);
859 		if (cc == -1) {
860 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
861 			return (-1);
862 		} else if (cc < len)
863 			_kvm_err(kd, kd->program, "short read");
864 		return (cc);
865 	} else {
866 		if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
867 			_kvm_err(kd, kd->program, "no valid dump header");
868 			return (-1);
869 		}
870 		cp = buf;
871 		while (len > 0) {
872 			paddr_t	pa;
873 
874 			/* In case of error, _kvm_kvatop sets the err string */
875 			cc = _kvm_kvatop(kd, kva, &pa);
876 			if (cc == 0)
877 				return (-1);
878 			if (cc > len)
879 				cc = len;
880 			cc = _kvm_pread(kd, kd->pmfd, cp, (size_t)cc,
881 			    (off_t)_kvm_pa2off(kd, pa));
882 			if (cc == -1) {
883 				_kvm_syserr(kd, 0, _PATH_MEM);
884 				break;
885 			}
886 			/*
887 			 * If kvm_kvatop returns a bogus value or our core
888 			 * file is truncated, we might wind up seeking beyond
889 			 * the end of the core file in which case the read will
890 			 * return 0 (EOF).
891 			 */
892 			if (cc == 0)
893 				break;
894 			cp = (char *)cp + cc;
895 			kva += cc;
896 			len -= cc;
897 		}
898 		return ((char *)cp - (char *)buf);
899 	}
900 	/* NOTREACHED */
901 }
902 DEF(kvm_read);
903 
904 ssize_t
kvm_write(kvm_t * kd,u_long kva,const void * buf,size_t len)905 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
906 {
907 	int cc;
908 
909 	if (ISALIVE(kd)) {
910 		/*
911 		 * Just like kvm_read, only we write.
912 		 */
913 		cc = _kvm_pwrite(kd, kd->vmfd, buf, len, (off_t)kva);
914 		if (cc == -1) {
915 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
916 			return (-1);
917 		} else if (cc < len)
918 			_kvm_err(kd, kd->program, "short write");
919 		return (cc);
920 	} else {
921 		_kvm_err(kd, kd->program,
922 		    "kvm_write not implemented for dead kernels");
923 		return (-1);
924 	}
925 	/* NOTREACHED */
926 }
927