xref: /dragonfly/sys/kern/sys_process.c (revision b608d1d3)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/proc.h>
38 #include <sys/priv.h>
39 #include <sys/vnode.h>
40 #include <sys/ptrace.h>
41 #include <sys/reg.h>
42 #include <sys/lock.h>
43 
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_page.h>
48 
49 #include <vfs/procfs/procfs.h>
50 
51 #include <sys/thread2.h>
52 #include <sys/spinlock2.h>
53 
54 /* use the equivalent procfs code */
55 #if 0
56 static int
57 pread (struct proc *procp, unsigned int addr, unsigned int *retval)
58 {
59 	int		rv;
60 	vm_map_t	map, tmap;
61 	vm_object_t	object;
62 	vm_map_backing_t ba;
63 	vm_offset_t	kva = 0;
64 	int		page_offset;	/* offset into page */
65 	vm_offset_t	pageno;		/* page number */
66 	vm_map_entry_t	out_entry;
67 	vm_prot_t	out_prot;
68 	int		wflags;
69 	vm_pindex_t	pindex;
70 
71 	/* Map page into kernel space */
72 
73 	map = &procp->p_vmspace->vm_map;
74 
75 	page_offset = addr - trunc_page(addr);
76 	pageno = trunc_page(addr);
77 
78 	tmap = map;
79 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
80 			   &ba, &pindex, &out_prot, &wflags);
81 	if (ba)
82 		object = ba->object;
83 	else
84 		object = NULL;
85 
86 
87 	if (rv != KERN_SUCCESS)
88 		return EINVAL;
89 
90 	vm_map_lookup_done (tmap, out_entry, 0);
91 
92 	/* Find space in kernel_map for the page we're interested in */
93 	rv = vm_map_find (&kernel_map, object, NULL,
94 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
95 			  PAGE_SIZE, FALSE,
96 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
97 			  VM_PROT_ALL, VM_PROT_ALL, 0);
98 
99 	if (!rv) {
100 		vm_object_reference XXX (object);
101 
102 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
103 		if (!rv) {
104 			*retval = 0;
105 			bcopy ((caddr_t)kva + page_offset,
106 			       retval, sizeof *retval);
107 		}
108 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
109 	}
110 
111 	return rv;
112 }
113 
114 static int
115 pwrite (struct proc *procp, unsigned int addr, unsigned int datum)
116 {
117 	int		rv;
118 	vm_map_t	map, tmap;
119 	vm_object_t	object;
120 	vm_map_backing_t ba;
121 	vm_offset_t	kva = 0;
122 	int		page_offset;	/* offset into page */
123 	vm_offset_t	pageno;		/* page number */
124 	vm_map_entry_t	out_entry;
125 	vm_prot_t	out_prot;
126 	int		wflags;
127 	vm_pindex_t	pindex;
128 	boolean_t	fix_prot = 0;
129 
130 	/* Map page into kernel space */
131 
132 	map = &procp->p_vmspace->vm_map;
133 
134 	page_offset = addr - trunc_page(addr);
135 	pageno = trunc_page(addr);
136 
137 	/*
138 	 * Check the permissions for the area we're interested in.
139 	 */
140 
141 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
142 				     VM_PROT_WRITE, FALSE) == FALSE) {
143 		/*
144 		 * If the page was not writable, we make it so.
145 		 * XXX It is possible a page may *not* be read/executable,
146 		 * if a process changes that!
147 		 */
148 		fix_prot = 1;
149 		/* The page isn't writable, so let's try making it so... */
150 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
151 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
152 		  return EFAULT;	/* I guess... */
153 	}
154 
155 	/*
156 	 * Now we need to get the page.  out_entry, out_prot, wflags, and
157 	 * single_use aren't used.  One would think the vm code would be
158 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
159 	 * change the map argument.
160 	 */
161 
162 	tmap = map;
163 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
164 			   &ba, &pindex, &out_prot, &wflags);
165 	if (ba)
166 		object = ba->object;
167 	else
168 		object = NULL;
169 
170 	if (rv != KERN_SUCCESS)
171 		return EINVAL;
172 
173 	/*
174 	 * Okay, we've got the page.  Let's release tmap.
175 	 */
176 	vm_map_lookup_done (tmap, out_entry, 0);
177 
178 	/*
179 	 * Fault the page in...
180 	 */
181 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
182 	if (rv != KERN_SUCCESS)
183 		return EFAULT;
184 
185 	/* Find space in kernel_map for the page we're interested in */
186 	rv = vm_map_find (&kernel_map, object, NULL,
187 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
188 			  PAGE_SIZE, FALSE,
189 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
190 			  VM_PROT_ALL, VM_PROT_ALL, 0);
191 	if (!rv) {
192 		vm_object_reference XXX (object);
193 
194 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
195 		if (!rv) {
196 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
197 		}
198 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
199 	}
200 
201 	if (fix_prot)
202 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
203 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
204 	return rv;
205 }
206 #endif
207 
208 /*
209  * Process debugging system call.
210  *
211  * MPALMOSTSAFE
212  */
213 int
214 sys_ptrace(struct ptrace_args *uap)
215 {
216 	struct proc *p = curproc;
217 
218 	/*
219 	 * XXX this obfuscation is to reduce stack usage, but the register
220 	 * structs may be too large to put on the stack anyway.
221 	 */
222 	union {
223 		struct ptrace_io_desc piod;
224 		struct dbreg dbreg;
225 		struct fpreg fpreg;
226 		struct reg reg;
227 	} r;
228 	void *addr;
229 	int error = 0;
230 
231 	addr = &r;
232 	switch (uap->req) {
233 	case PT_GETREGS:
234 	case PT_GETFPREGS:
235 #ifdef PT_GETDBREGS
236 	case PT_GETDBREGS:
237 #endif
238 		break;
239 	case PT_SETREGS:
240 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
241 		break;
242 	case PT_SETFPREGS:
243 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
244 		break;
245 #ifdef PT_SETDBREGS
246 	case PT_SETDBREGS:
247 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
248 		break;
249 #endif
250 	case PT_IO:
251 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
252 		break;
253 	default:
254 		addr = uap->addr;
255 	}
256 	if (error)
257 		return (error);
258 
259 	error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
260 			&uap->sysmsg_result);
261 	if (error)
262 		return (error);
263 
264 	switch (uap->req) {
265 	case PT_IO:
266 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
267 		break;
268 	case PT_GETREGS:
269 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
270 		break;
271 	case PT_GETFPREGS:
272 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
273 		break;
274 #ifdef PT_GETDBREGS
275 	case PT_GETDBREGS:
276 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
277 		break;
278 #endif
279 	}
280 
281 	return (error);
282 }
283 
284 int
285 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
286 	    int data, int *res)
287 {
288 	struct proc *p, *pp;
289 	struct lwp *lp;
290 	struct iovec iov;
291 	struct uio uio;
292 	struct ptrace_io_desc *piod;
293 	int error = 0;
294 	int write, tmp;
295 	int t;
296 
297 	write = 0;
298 	if (req == PT_TRACE_ME) {
299 		p = curp;
300 		PHOLD(p);
301 	} else {
302 		if ((p = pfind(pid)) == NULL)
303 			return ESRCH;
304 	}
305 	if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
306 		PRELE(p);
307 		return (ESRCH);
308 	}
309 	if (p->p_flags & P_SYSTEM) {
310 		PRELE(p);
311 		return EINVAL;
312 	}
313 
314 	lwkt_gettoken(&p->p_token);
315 	/* Can't trace a process that's currently exec'ing. */
316 	if ((p->p_flags & P_INEXEC) != 0) {
317 		lwkt_reltoken(&p->p_token);
318 		PRELE(p);
319 		return EAGAIN;
320 	}
321 
322 	/*
323 	 * Permissions check
324 	 */
325 	switch (req) {
326 	case PT_TRACE_ME:
327 		/* Always legal. */
328 		break;
329 
330 	case PT_ATTACH:
331 		/* Self */
332 		if (p->p_pid == curp->p_pid) {
333 			lwkt_reltoken(&p->p_token);
334 			PRELE(p);
335 			return EINVAL;
336 		}
337 
338 		/* Already traced */
339 		if (p->p_flags & P_TRACED) {
340 			lwkt_reltoken(&p->p_token);
341 			PRELE(p);
342 			return EBUSY;
343 		}
344 
345 		if (curp->p_flags & P_TRACED)
346 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
347 				if (pp == p) {
348 					lwkt_reltoken(&p->p_token);
349 					PRELE(p);
350 					return (EINVAL);
351 				}
352 
353 		/* not owned by you, has done setuid (unless you're root) */
354 		if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
355 		     (p->p_flags & P_SUGID)) {
356 			if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
357 				lwkt_reltoken(&p->p_token);
358 				PRELE(p);
359 				return error;
360 			}
361 		}
362 
363 		/* can't trace init when securelevel > 0 */
364 		if (securelevel > 0 && p->p_pid == 1) {
365 			lwkt_reltoken(&p->p_token);
366 			PRELE(p);
367 			return EPERM;
368 		}
369 
370 		/* OK */
371 		break;
372 
373 	case PT_READ_I:
374 	case PT_READ_D:
375 	case PT_WRITE_I:
376 	case PT_WRITE_D:
377 	case PT_IO:
378 	case PT_CONTINUE:
379 	case PT_KILL:
380 	case PT_STEP:
381 	case PT_DETACH:
382 #ifdef PT_GETREGS
383 	case PT_GETREGS:
384 #endif
385 #ifdef PT_SETREGS
386 	case PT_SETREGS:
387 #endif
388 #ifdef PT_GETFPREGS
389 	case PT_GETFPREGS:
390 #endif
391 #ifdef PT_SETFPREGS
392 	case PT_SETFPREGS:
393 #endif
394 #ifdef PT_GETDBREGS
395 	case PT_GETDBREGS:
396 #endif
397 #ifdef PT_SETDBREGS
398 	case PT_SETDBREGS:
399 #endif
400 		/* not being traced... */
401 		if ((p->p_flags & P_TRACED) == 0) {
402 			lwkt_reltoken(&p->p_token);
403 			PRELE(p);
404 			return EPERM;
405 		}
406 
407 		/* not being traced by YOU */
408 		if (p->p_pptr != curp) {
409 			lwkt_reltoken(&p->p_token);
410 			PRELE(p);
411 			return EBUSY;
412 		}
413 
414 		/* not currently stopped */
415 		if (p->p_stat != SSTOP ||
416 		    (p->p_flags & P_WAITED) == 0) {
417 			lwkt_reltoken(&p->p_token);
418 			PRELE(p);
419 			return EBUSY;
420 		}
421 
422 		/* OK */
423 		break;
424 
425 	default:
426 		lwkt_reltoken(&p->p_token);
427 		PRELE(p);
428 		return EINVAL;
429 	}
430 
431 	/* XXX lwp */
432 	lp = FIRST_LWP_IN_PROC(p);
433 #ifdef FIX_SSTEP
434 	/*
435 	 * Single step fixup ala procfs
436 	 */
437 	FIX_SSTEP(lp);
438 #endif
439 
440 	/*
441 	 * Actually do the requests
442 	 */
443 
444 	*res = 0;
445 
446 	switch (req) {
447 	case PT_TRACE_ME:
448 		/* set my trace flag and "owner" so it can read/write me */
449 		p->p_flags |= P_TRACED;
450 		p->p_oppid = p->p_pptr->p_pid;
451 		lwkt_reltoken(&p->p_token);
452 		PRELE(p);
453 		return 0;
454 
455 	case PT_ATTACH:
456 		/* security check done above */
457 		p->p_flags |= P_TRACED;
458 		p->p_oppid = p->p_pptr->p_pid;
459 		proc_reparent(p, curp);
460 		data = SIGSTOP;
461 		goto sendsig;	/* in PT_CONTINUE below */
462 
463 	case PT_STEP:
464 	case PT_CONTINUE:
465 	case PT_DETACH:
466 		/* Zero means do not send any signal */
467 		if (data < 0 || data > _SIG_MAXSIG) {
468 			lwkt_reltoken(&p->p_token);
469 			PRELE(p);
470 			return EINVAL;
471 		}
472 
473 		LWPHOLD(lp);
474 
475 		if (req == PT_STEP) {
476 			if ((error = ptrace_single_step (lp))) {
477 				LWPRELE(lp);
478 				lwkt_reltoken(&p->p_token);
479 				PRELE(p);
480 				return error;
481 			}
482 		}
483 
484 		if (addr != (void *)1) {
485 			if ((error = ptrace_set_pc (lp,
486 			    (u_long)(uintfptr_t)addr))) {
487 				LWPRELE(lp);
488 				lwkt_reltoken(&p->p_token);
489 				PRELE(p);
490 				return error;
491 			}
492 		}
493 		LWPRELE(lp);
494 
495 		if (req == PT_DETACH) {
496 			/* reset process parent */
497 			if (p->p_oppid != p->p_pptr->p_pid) {
498 				struct proc *pp;
499 
500 				pp = pfind(p->p_oppid);
501 				if (pp) {
502 					proc_reparent(p, pp);
503 					PRELE(pp);
504 				}
505 			}
506 
507 			p->p_flags &= ~(P_TRACED | P_WAITED);
508 			p->p_oppid = 0;
509 
510 			/* should we send SIGCHLD? */
511 		}
512 
513 	sendsig:
514 		/*
515 		 * Deliver or queue signal.  If the process is stopped
516 		 * force it to be SACTIVE again.
517 		 */
518 		crit_enter();
519 		if (p->p_stat == SSTOP) {
520 			p->p_xstat = data;
521 			proc_unstop(p, SSTOP);
522 		} else if (data) {
523 			ksignal(p, data);
524 		}
525 		crit_exit();
526 		lwkt_reltoken(&p->p_token);
527 		PRELE(p);
528 		return 0;
529 
530 	case PT_WRITE_I:
531 	case PT_WRITE_D:
532 		write = 1;
533 		/* fallthrough */
534 	case PT_READ_I:
535 	case PT_READ_D:
536 		/*
537 		 * NOTE! uio_offset represents the offset in the target
538 		 * process.  The iov is in the current process (the guy
539 		 * making the ptrace call) so uio_td must be the current
540 		 * process (though for a SYSSPACE transfer it doesn't
541 		 * really matter).
542 		 */
543 		tmp = 0;
544 		/* write = 0 set above */
545 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
546 		iov.iov_len = sizeof(int);
547 		uio.uio_iov = &iov;
548 		uio.uio_iovcnt = 1;
549 		uio.uio_offset = (off_t)(uintptr_t)addr;
550 		uio.uio_resid = sizeof(int);
551 		uio.uio_segflg = UIO_SYSSPACE;
552 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
553 		uio.uio_td = curthread;
554 		error = procfs_domem(curp, lp, NULL, &uio);
555 		if (uio.uio_resid != 0) {
556 			/*
557 			 * XXX procfs_domem() doesn't currently return ENOSPC,
558 			 * so I think write() can bogusly return 0.
559 			 * XXX what happens for short writes?  We don't want
560 			 * to write partial data.
561 			 * XXX procfs_domem() returns EPERM for other invalid
562 			 * addresses.  Convert this to EINVAL.  Does this
563 			 * clobber returns of EPERM for other reasons?
564 			 */
565 			if (error == 0 || error == ENOSPC || error == EPERM)
566 				error = EINVAL;	/* EOF */
567 		}
568 		if (!write)
569 			*res = tmp;
570 		lwkt_reltoken(&p->p_token);
571 		PRELE(p);
572 		return (error);
573 
574 	case PT_IO:
575 		/*
576 		 * NOTE! uio_offset represents the offset in the target
577 		 * process.  The iov is in the current process (the guy
578 		 * making the ptrace call) so uio_td must be the current
579 		 * process.
580 		 */
581 		piod = addr;
582 		iov.iov_base = piod->piod_addr;
583 		iov.iov_len = piod->piod_len;
584 		uio.uio_iov = &iov;
585 		uio.uio_iovcnt = 1;
586 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
587 		uio.uio_resid = piod->piod_len;
588 		uio.uio_segflg = UIO_USERSPACE;
589 		uio.uio_td = curthread;
590 		switch (piod->piod_op) {
591 		case PIOD_READ_D:
592 		case PIOD_READ_I:
593 			uio.uio_rw = UIO_READ;
594 			break;
595 		case PIOD_WRITE_D:
596 		case PIOD_WRITE_I:
597 			uio.uio_rw = UIO_WRITE;
598 			break;
599 		default:
600 			lwkt_reltoken(&p->p_token);
601 			PRELE(p);
602 			return (EINVAL);
603 		}
604 		error = procfs_domem(curp, lp, NULL, &uio);
605 		piod->piod_len -= uio.uio_resid;
606 		lwkt_reltoken(&p->p_token);
607 		PRELE(p);
608 		return (error);
609 
610 	case PT_KILL:
611 		data = SIGKILL;
612 		goto sendsig;	/* in PT_CONTINUE above */
613 
614 #ifdef PT_SETREGS
615 	case PT_SETREGS:
616 		write = 1;
617 		/* fallthrough */
618 #endif /* PT_SETREGS */
619 #ifdef PT_GETREGS
620 	case PT_GETREGS:
621 		/* write = 0 above */
622 #endif /* PT_SETREGS */
623 #if defined(PT_SETREGS) || defined(PT_GETREGS)
624 		if (!procfs_validregs(lp)) {
625 			lwkt_reltoken(&p->p_token);
626 			PRELE(p);
627 			return EINVAL;
628 		} else {
629 			iov.iov_base = addr;
630 			iov.iov_len = sizeof(struct reg);
631 			uio.uio_iov = &iov;
632 			uio.uio_iovcnt = 1;
633 			uio.uio_offset = 0;
634 			uio.uio_resid = sizeof(struct reg);
635 			uio.uio_segflg = UIO_SYSSPACE;
636 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
637 			uio.uio_td = curthread;
638 			t = procfs_doregs(curp, lp, NULL, &uio);
639 			lwkt_reltoken(&p->p_token);
640 			PRELE(p);
641 			return t;
642 		}
643 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
644 
645 #ifdef PT_SETFPREGS
646 	case PT_SETFPREGS:
647 		write = 1;
648 		/* fallthrough */
649 #endif /* PT_SETFPREGS */
650 #ifdef PT_GETFPREGS
651 	case PT_GETFPREGS:
652 		/* write = 0 above */
653 #endif /* PT_SETFPREGS */
654 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
655 		if (!procfs_validfpregs(lp)) {
656 			lwkt_reltoken(&p->p_token);
657 			PRELE(p);
658 			return EINVAL;
659 		} else {
660 			iov.iov_base = addr;
661 			iov.iov_len = sizeof(struct fpreg);
662 			uio.uio_iov = &iov;
663 			uio.uio_iovcnt = 1;
664 			uio.uio_offset = 0;
665 			uio.uio_resid = sizeof(struct fpreg);
666 			uio.uio_segflg = UIO_SYSSPACE;
667 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
668 			uio.uio_td = curthread;
669 			t = procfs_dofpregs(curp, lp, NULL, &uio);
670 			lwkt_reltoken(&p->p_token);
671 			PRELE(p);
672 			return t;
673 		}
674 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
675 
676 #ifdef PT_SETDBREGS
677 	case PT_SETDBREGS:
678 		write = 1;
679 		/* fallthrough */
680 #endif /* PT_SETDBREGS */
681 #ifdef PT_GETDBREGS
682 	case PT_GETDBREGS:
683 		/* write = 0 above */
684 #endif /* PT_SETDBREGS */
685 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
686 		if (!procfs_validdbregs(lp)) {
687 			lwkt_reltoken(&p->p_token);
688 			PRELE(p);
689 			return EINVAL;
690 		} else {
691 			iov.iov_base = addr;
692 			iov.iov_len = sizeof(struct dbreg);
693 			uio.uio_iov = &iov;
694 			uio.uio_iovcnt = 1;
695 			uio.uio_offset = 0;
696 			uio.uio_resid = sizeof(struct dbreg);
697 			uio.uio_segflg = UIO_SYSSPACE;
698 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
699 			uio.uio_td = curthread;
700 			t = procfs_dodbregs(curp, lp, NULL, &uio);
701 			lwkt_reltoken(&p->p_token);
702 			PRELE(p);
703 			return t;
704 		}
705 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
706 
707 	default:
708 		break;
709 	}
710 
711 	lwkt_reltoken(&p->p_token);
712 	PRELE(p);
713 
714 	return 0;
715 }
716 
717 int
718 trace_req(struct proc *p)
719 {
720 	return 1;
721 }
722 
723 /*
724  * stopevent()
725  *
726  * Stop a process because of a procfs event.  Stay stopped until p->p_step
727  * is cleared (cleared by PIOCCONT in procfs).
728  *
729  * MPSAFE
730  */
731 void
732 stopevent(struct proc *p, unsigned int event, unsigned int val)
733 {
734 	/*
735 	 * Set event info.  Recheck p_stops in case we are
736 	 * racing a close() on procfs.
737 	 */
738 	spin_lock(&p->p_spin);
739 	if ((p->p_stops & event) == 0) {
740 		spin_unlock(&p->p_spin);
741 		return;
742 	}
743 	p->p_xstat = val;
744 	p->p_stype = event;
745 	p->p_step = 1;
746 	tsleep_interlock(&p->p_step, 0);
747 	spin_unlock(&p->p_spin);
748 
749 	/*
750 	 * Wakeup any PIOCWAITing procs and wait for p_step to
751 	 * be cleared.
752 	 */
753 	for (;;) {
754 		wakeup(&p->p_stype);
755 		tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
756 		spin_lock(&p->p_spin);
757 		if (p->p_step == 0) {
758 			spin_unlock(&p->p_spin);
759 			break;
760 		}
761 		tsleep_interlock(&p->p_step, 0);
762 		spin_unlock(&p->p_spin);
763 	}
764 }
765 
766