xref: /dragonfly/lib/libkvm/kvm.c (revision d600454b)
1 /*-
2  * Copyright (c) 1989, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * @(#)kvm.c	8.2 (Berkeley) 2/13/94
38  * $FreeBSD: src/lib/libkvm/kvm.c,v 1.12.2.3 2002/09/13 14:53:43 nectar Exp $
39  * $DragonFly: src/lib/libkvm/kvm.c,v 1.8 2006/01/11 01:12:59 corecode Exp $
40  */
41 
42 #include <sys/param.h>
43 #include <sys/user.h>
44 #include <sys/proc.h>
45 #include <sys/ioctl.h>
46 #include <sys/stat.h>
47 #include <sys/sysctl.h>
48 #include <sys/linker.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/swap_pager.h>
53 
54 #include <machine/vmparam.h>
55 
56 #include <ctype.h>
57 #include <fcntl.h>
58 #include <kvm.h>
59 #include <limits.h>
60 #include <nlist.h>
61 #include <paths.h>
62 #include <stdio.h>
63 #include <stdlib.h>
64 #include <string.h>
65 #include <stdarg.h>
66 #include <unistd.h>
67 
68 #include "kvm_private.h"
69 
70 /* from src/lib/libc/gen/nlist.c */
71 int __fdnlist		(int, struct nlist *);
72 
73 char *
74 kvm_geterr(kvm_t *kd)
75 {
76 	return (kd->errbuf);
77 }
78 
79 /*
80  * Report an error using printf style arguments.  "program" is kd->program
81  * on hard errors, and 0 on soft errors, so that under sun error emulation,
82  * only hard errors are printed out (otherwise, programs like gdb will
83  * generate tons of error messages when trying to access bogus pointers).
84  */
85 void
86 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
87 {
88 	va_list ap;
89 
90 	va_start(ap, fmt);
91 	if (program != NULL) {
92 		(void)fprintf(stderr, "%s: ", program);
93 		(void)vfprintf(stderr, fmt, ap);
94 		(void)fputc('\n', stderr);
95 	} else
96 		(void)vsnprintf(kd->errbuf,
97 		    sizeof(kd->errbuf), (char *)fmt, ap);
98 
99 	va_end(ap);
100 }
101 
102 void
103 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
104 {
105 	va_list ap;
106 	int n;
107 
108 	va_start(ap, fmt);
109 	if (program != NULL) {
110 		(void)fprintf(stderr, "%s: ", program);
111 		(void)vfprintf(stderr, fmt, ap);
112 		(void)fprintf(stderr, ": %s\n", strerror(errno));
113 	} else {
114 		char *cp = kd->errbuf;
115 
116 		(void)vsnprintf(cp, sizeof(kd->errbuf), (char *)fmt, ap);
117 		n = strlen(cp);
118 		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
119 		    strerror(errno));
120 	}
121 	va_end(ap);
122 }
123 
124 void *
125 _kvm_malloc(kvm_t *kd, size_t n)
126 {
127 	void *p;
128 
129 	if ((p = calloc(n, sizeof(char))) == NULL)
130 		_kvm_err(kd, kd->program, "can't allocate %u bytes: %s",
131 			 n, strerror(errno));
132 	return (p);
133 }
134 
135 static kvm_t *
136 _kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
137 {
138 	struct stat st;
139 
140 	kd->vmfd = -1;
141 	kd->pmfd = -1;
142 	kd->nlfd = -1;
143 	kd->vmst = 0;
144 	kd->procbase = 0;
145 	kd->argspc = 0;
146 	kd->argv = 0;
147 
148 	if (uf == 0)
149 		uf = getbootfile();
150 	else if (strlen(uf) >= MAXPATHLEN) {
151 		_kvm_err(kd, kd->program, "exec file name too long");
152 		goto failed;
153 	}
154 	if (flag & ~O_RDWR) {
155 		_kvm_err(kd, kd->program, "bad flags arg");
156 		goto failed;
157 	}
158 	if (mf == 0)
159 		mf = _PATH_MEM;
160 
161 	if ((kd->pmfd = open(mf, flag, 0)) < 0) {
162 		_kvm_syserr(kd, kd->program, "%s", mf);
163 		goto failed;
164 	}
165 	if (fstat(kd->pmfd, &st) < 0) {
166 		_kvm_syserr(kd, kd->program, "%s", mf);
167 		goto failed;
168 	}
169 	if (fcntl(kd->pmfd, F_SETFD, FD_CLOEXEC) < 0) {
170 		_kvm_syserr(kd, kd->program, "%s", mf);
171 		goto failed;
172 	}
173 	if (S_ISCHR(st.st_mode)) {
174 		/*
175 		 * If this is a character special device, then check that
176 		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
177 		 * make it work for either /dev/mem or /dev/kmem -- in either
178 		 * case you're working with a live kernel.)
179 		 */
180 		if (strcmp(mf, _PATH_DEVNULL) == 0) {
181 			kd->vmfd = open(_PATH_DEVNULL, O_RDONLY);
182 		} else if (strcmp(mf, _PATH_MEM) != 0) {
183 			_kvm_err(kd, kd->program,
184 				 "%s: not physical memory device", mf);
185 			goto failed;
186 		} else {
187 			if ((kd->vmfd = open(_PATH_KMEM, flag)) < 0) {
188 				_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
189 				goto failed;
190 			}
191 			if (fcntl(kd->vmfd, F_SETFD, FD_CLOEXEC) < 0) {
192 				_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
193 				goto failed;
194 			}
195 		}
196 	} else {
197 		/*
198 		 * This is a crash dump.
199 		 * Initialize the virtual address translation machinery,
200 		 * but first setup the namelist fd.
201 		 */
202 		if ((kd->nlfd = open(uf, O_RDONLY, 0)) < 0) {
203 			_kvm_syserr(kd, kd->program, "%s", uf);
204 			goto failed;
205 		}
206 		if (fcntl(kd->nlfd, F_SETFD, FD_CLOEXEC) < 0) {
207 			_kvm_syserr(kd, kd->program, "%s", uf);
208 			goto failed;
209 		}
210 		if (_kvm_initvtop(kd) < 0)
211 			goto failed;
212 	}
213 	return (kd);
214 failed:
215 	/*
216 	 * Copy out the error if doing sane error semantics.
217 	 */
218 	if (errout != 0)
219 		strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
220 	(void)kvm_close(kd);
221 	return (0);
222 }
223 
224 kvm_t *
225 kvm_openfiles(const char *uf, const char *mf, const char *sf, int flag,
226 	      char *errout)
227 {
228 	kvm_t *kd;
229 
230 	if ((kd = malloc(sizeof(*kd))) == NULL) {
231 		(void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
232 		return (0);
233 	}
234 	memset(kd, 0, sizeof(*kd));
235 	kd->program = 0;
236 	return (_kvm_open(kd, uf, mf, flag, errout));
237 }
238 
239 kvm_t *
240 kvm_open(const char *uf, const char *mf, const char *sf, int flag,
241 	 const char *errstr)
242 {
243 	kvm_t *kd;
244 
245 	if ((kd = malloc(sizeof(*kd))) == NULL) {
246 		if (errstr != NULL)
247 			(void)fprintf(stderr, "%s: %s\n",
248 				      errstr, strerror(errno));
249 		return (0);
250 	}
251 	memset(kd, 0, sizeof(*kd));
252 	kd->program = errstr;
253 	return (_kvm_open(kd, uf, mf, flag, NULL));
254 }
255 
256 int
257 kvm_close(kvm_t *kd)
258 {
259 	int error = 0;
260 
261 	if (kd->pmfd >= 0)
262 		error |= close(kd->pmfd);
263 	if (kd->vmfd >= 0)
264 		error |= close(kd->vmfd);
265 	if (kd->nlfd >= 0)
266 		error |= close(kd->nlfd);
267 	if (kd->vmst)
268 		_kvm_freevtop(kd);
269 	if (kd->procbase != 0)
270 		free((void *)kd->procbase);
271 	if (kd->argv != 0)
272 		free((void *)kd->argv);
273 	free((void *)kd);
274 
275 	return (0);
276 }
277 
278 int
279 kvm_nlist(kvm_t *kd, struct nlist *nl)
280 {
281 	struct nlist *p;
282 	int nvalid;
283 	struct kld_sym_lookup lookup;
284 
285 	/*
286 	 * If we can't use the kld symbol lookup, revert to the
287 	 * slow library call.
288 	 */
289 	if (!ISALIVE(kd))
290 		return (__fdnlist(kd->nlfd, nl));
291 
292 	/*
293 	 * We can use the kld lookup syscall.  Go through each nlist entry
294 	 * and look it up with a kldsym(2) syscall.
295 	 */
296 	nvalid = 0;
297 	for (p = nl; p->n_name && p->n_name[0]; ++p) {
298 		lookup.version = sizeof(lookup);
299 		lookup.symname = p->n_name;
300 		lookup.symvalue = 0;
301 		lookup.symsize = 0;
302 
303 		if (lookup.symname[0] == '_')
304 			lookup.symname++;
305 
306 		if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) {
307 			p->n_type = N_TEXT;
308 			p->n_other = 0;
309 			p->n_desc = 0;
310 			p->n_value = lookup.symvalue;
311 			++nvalid;
312 			/* lookup.symsize */
313 		}
314 	}
315 	/*
316 	 * Return the number of entries that weren't found.
317 	 */
318 	return ((p - nl) - nvalid);
319 }
320 
321 ssize_t
322 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
323 {
324 	int cc;
325 	void *cp;
326 
327 	if (ISALIVE(kd)) {
328 		/*
329 		 * We're using /dev/kmem.  Just read straight from the
330 		 * device and let the active kernel do the address translation.
331 		 */
332 		errno = 0;
333 		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
334 			_kvm_err(kd, 0, "invalid address (%x)", kva);
335 			return (-1);
336 		}
337 
338 		/*
339 		 * Try to pre-fault the user memory to reduce instances of
340 		 * races within the kernel.  XXX workaround for kernel bug
341 		 * where kernel does a sanity check, but user faults during
342 		 * the copy can block and race against another kernel entity
343 		 * unmapping the memory in question.
344 		 */
345 		bzero(buf, len);
346 		cc = read(kd->vmfd, buf, len);
347 		if (cc < 0) {
348 			_kvm_syserr(kd, 0, "kvm_read");
349 			return (-1);
350 		} else if (cc < len)
351 			_kvm_err(kd, kd->program, "short read");
352 		return (cc);
353 	} else {
354 		cp = buf;
355 		while (len > 0) {
356 			u_long pa;
357 
358 			cc = _kvm_kvatop(kd, kva, &pa);
359 			if (cc == 0)
360 				return (-1);
361 			if (cc > len)
362 				cc = len;
363 			errno = 0;
364 			if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
365 				_kvm_syserr(kd, 0, _PATH_MEM);
366 				break;
367 			}
368 			bzero(cp, cc);
369 			cc = read(kd->pmfd, cp, cc);
370 			if (cc < 0) {
371 				_kvm_syserr(kd, kd->program, "kvm_read");
372 				break;
373 			}
374 			/*
375 			 * If kvm_kvatop returns a bogus value or our core
376 			 * file is truncated, we might wind up seeking beyond
377 			 * the end of the core file in which case the read will
378 			 * return 0 (EOF).
379 			 */
380 			if (cc == 0)
381 				break;
382 			cp = (char *)cp + cc;
383 			kva += cc;
384 			len -= cc;
385 		}
386 		return ((char *)cp - (char *)buf);
387 	}
388 	/* NOTREACHED */
389 }
390 
391 char *
392 kvm_readstr(kvm_t *kd, u_long kva, char *buf, size_t *lenp)
393 {
394 	size_t len, cc, pos;
395 	char ch;
396 	int asize = -1;
397 
398 	if (buf == NULL) {
399 		asize = len = 16;
400 		buf = malloc(len);
401 		if (buf == NULL) {
402 			_kvm_syserr(kd, kd->program, "kvm_readstr");
403 			return NULL;
404 		}
405 	} else {
406 		len = *lenp;
407 	}
408 
409 	if (ISALIVE(kd)) {
410 		/*
411 		 * We're using /dev/kmem.  Just read straight from the
412 		 * device and let the active kernel do the address translation.
413 		 */
414 		errno = 0;
415 		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
416 			_kvm_err(kd, 0, "invalid address (%x)", kva);
417 			return NULL;
418 		}
419 
420 		for (pos = 0, ch = -1; ch != 0; pos++) {
421 			cc = read(kd->vmfd, &ch, 1);
422 			if ((ssize_t)cc < 0) {
423 				_kvm_syserr(kd, 0, "kvm_readstr");
424 				return NULL;
425 			} else if (cc < 1)
426 				_kvm_err(kd, kd->program, "short read");
427 			if (pos == asize) {
428 				buf = realloc(buf, asize *= 2);
429 				if (buf == NULL) {
430 					_kvm_syserr(kd, kd->program, "kvm_readstr");
431 					return NULL;
432 				}
433 				len = asize;
434 			}
435 			if (pos < len)
436 				buf[pos] = ch;
437 		}
438 
439 		if (lenp != NULL)
440 			*lenp = pos;
441 		if (pos > len)
442 			return NULL;
443 		else
444 			return buf;
445 	} else {
446 		size_t left = 0;
447 		for (pos = 0, ch = -1; ch != 0; pos++, left--, kva++) {
448 			if (left == 0) {
449 				u_long pa;
450 
451 				left = _kvm_kvatop(kd, kva, &pa);
452 				if (left == 0)
453 					return NULL;
454 				errno = 0;
455 				if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
456 					_kvm_syserr(kd, 0, _PATH_MEM);
457 					return NULL;
458 				}
459 			}
460 			cc = read(kd->vmfd, &ch, 1);
461 			if ((ssize_t)cc < 0) {
462 				_kvm_syserr(kd, 0, "kvm_readstr");
463 				return NULL;
464 			} else if (cc < 1)
465 				_kvm_err(kd, kd->program, "short read");
466 			if (pos == asize) {
467 				buf = realloc(buf, asize *= 2);
468 				if (buf == NULL) {
469 					_kvm_syserr(kd, kd->program, "kvm_readstr");
470 					return NULL;
471 				}
472 				len = asize;
473 			}
474 			if (pos < len)
475 				buf[pos] = ch;
476 		}
477 
478 		if (lenp != NULL)
479 			*lenp = pos;
480 		if (pos > len)
481 			return NULL;
482 		else
483 			return buf;
484 	}
485 	/* NOTREACHED */
486 }
487 
488 ssize_t
489 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
490 {
491 	int cc;
492 
493 	if (ISALIVE(kd)) {
494 		/*
495 		 * Just like kvm_read, only we write.
496 		 */
497 		errno = 0;
498 		if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
499 			_kvm_err(kd, 0, "invalid address (%x)", kva);
500 			return (-1);
501 		}
502 		cc = write(kd->vmfd, buf, len);
503 		if (cc < 0) {
504 			_kvm_syserr(kd, 0, "kvm_write");
505 			return (-1);
506 		} else if (cc < len)
507 			_kvm_err(kd, kd->program, "short write");
508 		return (cc);
509 	} else {
510 		_kvm_err(kd, kd->program,
511 		    "kvm_write not implemented for dead kernels");
512 		return (-1);
513 	}
514 	/* NOTREACHED */
515 }
516