xref: /original-bsd/lib/libkvm/kvm_proc.c (revision b6592f3d)
1 /*-
2  * Copyright (c) 1989, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * %sccs.include.redist.c%
10  */
11 
12 #if defined(LIBC_SCCS) && !defined(lint)
13 static char sccsid[] = "@(#)kvm_proc.c	8.4 (Berkeley) 08/20/94";
14 #endif /* LIBC_SCCS and not lint */
15 
16 /*
17  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
18  * users of this code, so we've factored it out into a separate module.
19  * Thus, we keep this grunge out of the other kvm applications (i.e.,
20  * most other applications are interested only in open/close/read/nlist).
21  */
22 
23 #include <sys/param.h>
24 #include <sys/user.h>
25 #include <sys/proc.h>
26 #include <sys/exec.h>
27 #include <sys/stat.h>
28 #include <sys/ioctl.h>
29 #include <sys/tty.h>
30 #include <unistd.h>
31 #include <nlist.h>
32 #include <kvm.h>
33 
34 #include <vm/vm.h>
35 #include <vm/vm_param.h>
36 #include <vm/swap_pager.h>
37 
38 #include <sys/sysctl.h>
39 
40 #include <limits.h>
41 #include <db.h>
42 #include <paths.h>
43 
44 #include "kvm_private.h"
45 
46 static char *
47 kvm_readswap(kd, p, va, cnt)
48 	kvm_t *kd;
49 	const struct proc *p;
50 	u_long va;
51 	u_long *cnt;
52 {
53 	register int ix;
54 	register u_long addr, head;
55 	register u_long offset, pagestart, sbstart, pgoff;
56 	register off_t seekpoint;
57 	struct vm_map_entry vme;
58 	struct vm_object vmo;
59 	struct pager_struct pager;
60 	struct swpager swap;
61 	struct swblock swb;
62 	static char page[NBPG];
63 
64 	head = (u_long)&p->p_vmspace->vm_map.header;
65 	/*
66 	 * Look through the address map for the memory object
67 	 * that corresponds to the given virtual address.
68 	 * The header just has the entire valid range.
69 	 */
70 	addr = head;
71 	while (1) {
72 		if (kvm_read(kd, addr, (char *)&vme, sizeof(vme)) !=
73 		    sizeof(vme))
74 			return (0);
75 
76 		if (va >= vme.start && va <= vme.end &&
77 		    vme.object.vm_object != 0)
78 			break;
79 
80 		addr = (u_long)vme.next;
81 		if (addr == 0 || addr == head)
82 			return (0);
83 	}
84 	/*
85 	 * We found the right object -- follow shadow links.
86 	 */
87 	offset = va - vme.start + vme.offset;
88 	addr = (u_long)vme.object.vm_object;
89 	while (1) {
90 		if (kvm_read(kd, addr, (char *)&vmo, sizeof(vmo)) !=
91 		    sizeof(vmo))
92 			return (0);
93 		addr = (u_long)vmo.shadow;
94 		if (addr == 0)
95 			break;
96 		offset += vmo.shadow_offset;
97 	}
98 	if (vmo.pager == 0)
99 		return (0);
100 
101 	offset += vmo.paging_offset;
102 	/*
103 	 * Read in the pager info and make sure it's a swap device.
104 	 */
105 	addr = (u_long)vmo.pager;
106 	if (kvm_read(kd, addr, (char *)&pager, sizeof(pager)) != sizeof(pager)
107 	    || pager.pg_type != PG_SWAP)
108 		return (0);
109 
110 	/*
111 	 * Read in the swap_pager private data, and compute the
112 	 * swap offset.
113 	 */
114 	addr = (u_long)pager.pg_data;
115 	if (kvm_read(kd, addr, (char *)&swap, sizeof(swap)) != sizeof(swap))
116 		return (0);
117 	ix = offset / dbtob(swap.sw_bsize);
118 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
119 		return (0);
120 
121 	addr = (u_long)&swap.sw_blocks[ix];
122 	if (kvm_read(kd, addr, (char *)&swb, sizeof(swb)) != sizeof(swb))
123 		return (0);
124 
125 	sbstart = (offset / dbtob(swap.sw_bsize)) * dbtob(swap.sw_bsize);
126 	sbstart /= NBPG;
127 	pagestart = offset / NBPG;
128 	pgoff = pagestart - sbstart;
129 
130 	if (swb.swb_block == 0 || (swb.swb_mask & (1 << pgoff)) == 0)
131 		return (0);
132 
133 	seekpoint = dbtob(swb.swb_block) + ctob(pgoff);
134 	errno = 0;
135 	if (lseek(kd->swfd, seekpoint, 0) == -1 && errno != 0)
136 		return (0);
137 	if (read(kd->swfd, page, sizeof(page)) != sizeof(page))
138 		return (0);
139 
140 	offset %= NBPG;
141 	*cnt = NBPG - offset;
142 	return (&page[offset]);
143 }
144 
145 #define KREAD(kd, addr, obj) \
146 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
147 
148 /*
149  * Read proc's from memory file into buffer bp, which has space to hold
150  * at most maxcnt procs.
151  */
152 static int
153 kvm_proclist(kd, what, arg, p, bp, maxcnt)
154 	kvm_t *kd;
155 	int what, arg;
156 	struct proc *p;
157 	struct kinfo_proc *bp;
158 	int maxcnt;
159 {
160 	register int cnt = 0;
161 	struct eproc eproc;
162 	struct pgrp pgrp;
163 	struct session sess;
164 	struct tty tty;
165 	struct proc proc;
166 
167 	for (; cnt < maxcnt && p != 0; p = proc.p_list.le_next) {
168 		if (KREAD(kd, (u_long)p, &proc)) {
169 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
170 			return (-1);
171 		}
172 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
173 			KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
174 			      &eproc.e_ucred);
175 
176 		switch(what) {
177 
178 		case KERN_PROC_PID:
179 			if (proc.p_pid != (pid_t)arg)
180 				continue;
181 			break;
182 
183 		case KERN_PROC_UID:
184 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
185 				continue;
186 			break;
187 
188 		case KERN_PROC_RUID:
189 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
190 				continue;
191 			break;
192 		}
193 		/*
194 		 * We're going to add another proc to the set.  If this
195 		 * will overflow the buffer, assume the reason is because
196 		 * nprocs (or the proc list) is corrupt and declare an error.
197 		 */
198 		if (cnt >= maxcnt) {
199 			_kvm_err(kd, kd->program, "nprocs corrupt");
200 			return (-1);
201 		}
202 		/*
203 		 * gather eproc
204 		 */
205 		eproc.e_paddr = p;
206 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
207 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
208 				 proc.p_pgrp);
209 			return (-1);
210 		}
211 		eproc.e_sess = pgrp.pg_session;
212 		eproc.e_pgid = pgrp.pg_id;
213 		eproc.e_jobc = pgrp.pg_jobc;
214 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
215 			_kvm_err(kd, kd->program, "can't read session at %x",
216 				pgrp.pg_session);
217 			return (-1);
218 		}
219 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
220 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
221 				_kvm_err(kd, kd->program,
222 					 "can't read tty at %x", sess.s_ttyp);
223 				return (-1);
224 			}
225 			eproc.e_tdev = tty.t_dev;
226 			eproc.e_tsess = tty.t_session;
227 			if (tty.t_pgrp != NULL) {
228 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
229 					_kvm_err(kd, kd->program,
230 						 "can't read tpgrp at &x",
231 						tty.t_pgrp);
232 					return (-1);
233 				}
234 				eproc.e_tpgid = pgrp.pg_id;
235 			} else
236 				eproc.e_tpgid = -1;
237 		} else
238 			eproc.e_tdev = NODEV;
239 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
240 		if (sess.s_leader == p)
241 			eproc.e_flag |= EPROC_SLEADER;
242 		if (proc.p_wmesg)
243 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
244 			    eproc.e_wmesg, WMESGLEN);
245 
246 #ifdef sparc
247 		(void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_rssize,
248 		    (char *)&eproc.e_vm.vm_rssize,
249 		    sizeof(eproc.e_vm.vm_rssize));
250 		(void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_tsize,
251 		    (char *)&eproc.e_vm.vm_tsize,
252 		    3 * sizeof(eproc.e_vm.vm_rssize));	/* XXX */
253 #else
254 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
255 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
256 #endif
257 		eproc.e_xsize = eproc.e_xrssize = 0;
258 		eproc.e_xccount = eproc.e_xswrss = 0;
259 
260 		switch (what) {
261 
262 		case KERN_PROC_PGRP:
263 			if (eproc.e_pgid != (pid_t)arg)
264 				continue;
265 			break;
266 
267 		case KERN_PROC_TTY:
268 			if ((proc.p_flag & P_CONTROLT) == 0 ||
269 			     eproc.e_tdev != (dev_t)arg)
270 				continue;
271 			break;
272 		}
273 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
274 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
275 		++bp;
276 		++cnt;
277 	}
278 	return (cnt);
279 }
280 
281 /*
282  * Build proc info array by reading in proc list from a crash dump.
283  * Return number of procs read.  maxcnt is the max we will read.
284  */
285 static int
286 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
287 	kvm_t *kd;
288 	int what, arg;
289 	u_long a_allproc;
290 	u_long a_zombproc;
291 	int maxcnt;
292 {
293 	register struct kinfo_proc *bp = kd->procbase;
294 	register int acnt, zcnt;
295 	struct proc *p;
296 
297 	if (KREAD(kd, a_allproc, &p)) {
298 		_kvm_err(kd, kd->program, "cannot read allproc");
299 		return (-1);
300 	}
301 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
302 	if (acnt < 0)
303 		return (acnt);
304 
305 	if (KREAD(kd, a_zombproc, &p)) {
306 		_kvm_err(kd, kd->program, "cannot read zombproc");
307 		return (-1);
308 	}
309 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
310 	if (zcnt < 0)
311 		zcnt = 0;
312 
313 	return (acnt + zcnt);
314 }
315 
316 struct kinfo_proc *
317 kvm_getprocs(kd, op, arg, cnt)
318 	kvm_t *kd;
319 	int op, arg;
320 	int *cnt;
321 {
322 	int mib[4], size, st, nprocs;
323 
324 	if (kd->procbase != 0) {
325 		free((void *)kd->procbase);
326 		/*
327 		 * Clear this pointer in case this call fails.  Otherwise,
328 		 * kvm_close() will free it again.
329 		 */
330 		kd->procbase = 0;
331 	}
332 	if (ISALIVE(kd)) {
333 		size = 0;
334 		mib[0] = CTL_KERN;
335 		mib[1] = KERN_PROC;
336 		mib[2] = op;
337 		mib[3] = arg;
338 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
339 		if (st == -1) {
340 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
341 			return (0);
342 		}
343 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
344 		if (kd->procbase == 0)
345 			return (0);
346 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
347 		if (st == -1) {
348 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
349 			return (0);
350 		}
351 		if (size % sizeof(struct kinfo_proc) != 0) {
352 			_kvm_err(kd, kd->program,
353 				"proc size mismatch (%d total, %d chunks)",
354 				size, sizeof(struct kinfo_proc));
355 			return (0);
356 		}
357 		nprocs = size / sizeof(struct kinfo_proc);
358 	} else {
359 		struct nlist nl[4], *p;
360 
361 		nl[0].n_name = "_nprocs";
362 		nl[1].n_name = "_allproc";
363 		nl[2].n_name = "_zombproc";
364 		nl[3].n_name = 0;
365 
366 		if (kvm_nlist(kd, nl) != 0) {
367 			for (p = nl; p->n_type != 0; ++p)
368 				;
369 			_kvm_err(kd, kd->program,
370 				 "%s: no such symbol", p->n_name);
371 			return (0);
372 		}
373 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
374 			_kvm_err(kd, kd->program, "can't read nprocs");
375 			return (0);
376 		}
377 		size = nprocs * sizeof(struct kinfo_proc);
378 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
379 		if (kd->procbase == 0)
380 			return (0);
381 
382 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
383 				      nl[2].n_value, nprocs);
384 #ifdef notdef
385 		size = nprocs * sizeof(struct kinfo_proc);
386 		(void)realloc(kd->procbase, size);
387 #endif
388 	}
389 	*cnt = nprocs;
390 	return (kd->procbase);
391 }
392 
393 void
394 _kvm_freeprocs(kd)
395 	kvm_t *kd;
396 {
397 	if (kd->procbase) {
398 		free(kd->procbase);
399 		kd->procbase = 0;
400 	}
401 }
402 
403 void *
404 _kvm_realloc(kd, p, n)
405 	kvm_t *kd;
406 	void *p;
407 	size_t n;
408 {
409 	void *np = (void *)realloc(p, n);
410 
411 	if (np == 0)
412 		_kvm_err(kd, kd->program, "out of memory");
413 	return (np);
414 }
415 
416 #ifndef MAX
417 #define MAX(a, b) ((a) > (b) ? (a) : (b))
418 #endif
419 
420 /*
421  * Read in an argument vector from the user address space of process p.
422  * addr if the user-space base address of narg null-terminated contiguous
423  * strings.  This is used to read in both the command arguments and
424  * environment strings.  Read at most maxcnt characters of strings.
425  */
426 static char **
427 kvm_argv(kd, p, addr, narg, maxcnt)
428 	kvm_t *kd;
429 	struct proc *p;
430 	register u_long addr;
431 	register int narg;
432 	register int maxcnt;
433 {
434 	register char *cp;
435 	register int len, cc;
436 	register char **argv;
437 
438 	/*
439 	 * Check that there aren't an unreasonable number of agruments,
440 	 * and that the address is in user space.
441 	 */
442 	if (narg > 512 || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
443 		return (0);
444 
445 	if (kd->argv == 0) {
446 		/*
447 		 * Try to avoid reallocs.
448 		 */
449 		kd->argc = MAX(narg + 1, 32);
450 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
451 						sizeof(*kd->argv));
452 		if (kd->argv == 0)
453 			return (0);
454 	} else if (narg + 1 > kd->argc) {
455 		kd->argc = MAX(2 * kd->argc, narg + 1);
456 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
457 						sizeof(*kd->argv));
458 		if (kd->argv == 0)
459 			return (0);
460 	}
461 	if (kd->argspc == 0) {
462 		kd->argspc = (char *)_kvm_malloc(kd, NBPG);
463 		if (kd->argspc == 0)
464 			return (0);
465 		kd->arglen = NBPG;
466 	}
467 	cp = kd->argspc;
468 	argv = kd->argv;
469 	*argv = cp;
470 	len = 0;
471 	/*
472 	 * Loop over pages, filling in the argument vector.
473 	 */
474 	while (addr < VM_MAXUSER_ADDRESS) {
475 		cc = NBPG - (addr & PGOFSET);
476 		if (maxcnt > 0 && cc > maxcnt - len)
477 			cc = maxcnt - len;;
478 		if (len + cc > kd->arglen) {
479 			register int off;
480 			register char **pp;
481 			register char *op = kd->argspc;
482 
483 			kd->arglen *= 2;
484 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
485 							  kd->arglen);
486 			if (kd->argspc == 0)
487 				return (0);
488 			cp = &kd->argspc[len];
489 			/*
490 			 * Adjust argv pointers in case realloc moved
491 			 * the string space.
492 			 */
493 			off = kd->argspc - op;
494 			for (pp = kd->argv; pp < argv; ++pp)
495 				*pp += off;
496 		}
497 		if (kvm_uread(kd, p, addr, cp, cc) != cc)
498 			/* XXX */
499 			return (0);
500 		len += cc;
501 		addr += cc;
502 
503 		if (maxcnt == 0 && len > 16 * NBPG)
504 			/* sanity */
505 			return (0);
506 
507 		while (--cc >= 0) {
508 			if (*cp++ == 0) {
509 				if (--narg <= 0) {
510 					*++argv = 0;
511 					return (kd->argv);
512 				} else
513 					*++argv = cp;
514 			}
515 		}
516 		if (maxcnt > 0 && len >= maxcnt) {
517 			/*
518 			 * We're stopping prematurely.  Terminate the
519 			 * argv and current string.
520 			 */
521 			*++argv = 0;
522 			*cp = 0;
523 			return (kd->argv);
524 		}
525 	}
526 }
527 
528 static void
529 ps_str_a(p, addr, n)
530 	struct ps_strings *p;
531 	u_long *addr;
532 	int *n;
533 {
534 	*addr = (u_long)p->ps_argvstr;
535 	*n = p->ps_nargvstr;
536 }
537 
538 static void
539 ps_str_e(p, addr, n)
540 	struct ps_strings *p;
541 	u_long *addr;
542 	int *n;
543 {
544 	*addr = (u_long)p->ps_envstr;
545 	*n = p->ps_nenvstr;
546 }
547 
548 /*
549  * Determine if the proc indicated by p is still active.
550  * This test is not 100% foolproof in theory, but chances of
551  * being wrong are very low.
552  */
553 static int
554 proc_verify(kd, kernp, p)
555 	kvm_t *kd;
556 	u_long kernp;
557 	const struct proc *p;
558 {
559 	struct proc kernproc;
560 
561 	/*
562 	 * Just read in the whole proc.  It's not that big relative
563 	 * to the cost of the read system call.
564 	 */
565 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
566 	    sizeof(kernproc))
567 		return (0);
568 	return (p->p_pid == kernproc.p_pid &&
569 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
570 }
571 
572 static char **
573 kvm_doargv(kd, kp, nchr, info)
574 	kvm_t *kd;
575 	const struct kinfo_proc *kp;
576 	int nchr;
577 	int (*info)(struct ps_strings*, u_long *, int *);
578 {
579 	register const struct proc *p = &kp->kp_proc;
580 	register char **ap;
581 	u_long addr;
582 	int cnt;
583 	struct ps_strings arginfo;
584 
585 	/*
586 	 * Pointers are stored at the top of the user stack.
587 	 */
588 	if (p->p_stat == SZOMB ||
589 	    kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo,
590 		      sizeof(arginfo)) != sizeof(arginfo))
591 		return (0);
592 
593 	(*info)(&arginfo, &addr, &cnt);
594 	ap = kvm_argv(kd, p, addr, cnt, nchr);
595 	/*
596 	 * For live kernels, make sure this process didn't go away.
597 	 */
598 	if (ap != 0 && ISALIVE(kd) &&
599 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
600 		ap = 0;
601 	return (ap);
602 }
603 
604 /*
605  * Get the command args.  This code is now machine independent.
606  */
607 char **
608 kvm_getargv(kd, kp, nchr)
609 	kvm_t *kd;
610 	const struct kinfo_proc *kp;
611 	int nchr;
612 {
613 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
614 }
615 
616 char **
617 kvm_getenvv(kd, kp, nchr)
618 	kvm_t *kd;
619 	const struct kinfo_proc *kp;
620 	int nchr;
621 {
622 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
623 }
624 
625 /*
626  * Read from user space.  The user context is given by p.
627  */
628 ssize_t
629 kvm_uread(kd, p, uva, buf, len)
630 	kvm_t *kd;
631 	register struct proc *p;
632 	register u_long uva;
633 	register char *buf;
634 	register size_t len;
635 {
636 	register char *cp;
637 
638 	cp = buf;
639 	while (len > 0) {
640 		u_long pa;
641 		register int cc;
642 
643 		cc = _kvm_uvatop(kd, p, uva, &pa);
644 		if (cc > 0) {
645 			if (cc > len)
646 				cc = len;
647 			errno = 0;
648 			if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
649 				_kvm_err(kd, 0, "invalid address (%x)", uva);
650 				break;
651 			}
652 			cc = read(kd->pmfd, cp, cc);
653 			if (cc < 0) {
654 				_kvm_syserr(kd, 0, _PATH_MEM);
655 				break;
656 			} else if (cc < len) {
657 				_kvm_err(kd, kd->program, "short read");
658 				break;
659 			}
660 		} else if (ISALIVE(kd)) {
661 			/* try swap */
662 			register char *dp;
663 			int cnt;
664 
665 			dp = kvm_readswap(kd, p, uva, &cnt);
666 			if (dp == 0) {
667 				_kvm_err(kd, 0, "invalid address (%x)", uva);
668 				return (0);
669 			}
670 			cc = MIN(cnt, len);
671 			bcopy(dp, cp, cc);
672 		} else
673 			break;
674 		cp += cc;
675 		uva += cc;
676 		len -= cc;
677 	}
678 	return (ssize_t)(cp - buf);
679 }
680