xref: /dragonfly/sys/kern/sys_process.c (revision dd491ed2)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32  * $DragonFly: src/sys/kern/sys_process.c,v 1.30 2007/02/19 01:14:23 corecode Exp $
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 
50 #include <sys/user.h>
51 #include <vfs/procfs/procfs.h>
52 
53 #include <sys/thread2.h>
54 #include <sys/spinlock2.h>
55 
56 /* use the equivalent procfs code */
57 #if 0
58 static int
59 pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
60 	int		rv;
61 	vm_map_t	map, tmap;
62 	vm_object_t	object;
63 	vm_offset_t	kva = 0;
64 	int		page_offset;	/* offset into page */
65 	vm_offset_t	pageno;		/* page number */
66 	vm_map_entry_t	out_entry;
67 	vm_prot_t	out_prot;
68 	int		wflags;
69 	vm_pindex_t	pindex;
70 
71 	/* Map page into kernel space */
72 
73 	map = &procp->p_vmspace->vm_map;
74 
75 	page_offset = addr - trunc_page(addr);
76 	pageno = trunc_page(addr);
77 
78 	tmap = map;
79 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
80 			   &object, &pindex, &out_prot, &wflags);
81 
82 	if (rv != KERN_SUCCESS)
83 		return EINVAL;
84 
85 	vm_map_lookup_done (tmap, out_entry, 0);
86 
87 	/* Find space in kernel_map for the page we're interested in */
88 	rv = vm_map_find (&kernel_map, object, NULL,
89 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
90 			  PAGE_SIZE, FALSE,
91 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
92 			  VM_PROT_ALL, VM_PROT_ALL, 0);
93 
94 	if (!rv) {
95 		vm_object_reference XXX (object);
96 
97 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
98 		if (!rv) {
99 			*retval = 0;
100 			bcopy ((caddr_t)kva + page_offset,
101 			       retval, sizeof *retval);
102 		}
103 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
104 	}
105 
106 	return rv;
107 }
108 
109 static int
110 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
111 	int		rv;
112 	vm_map_t	map, tmap;
113 	vm_object_t	object;
114 	vm_offset_t	kva = 0;
115 	int		page_offset;	/* offset into page */
116 	vm_offset_t	pageno;		/* page number */
117 	vm_map_entry_t	out_entry;
118 	vm_prot_t	out_prot;
119 	int		wflags;
120 	vm_pindex_t	pindex;
121 	boolean_t	fix_prot = 0;
122 
123 	/* Map page into kernel space */
124 
125 	map = &procp->p_vmspace->vm_map;
126 
127 	page_offset = addr - trunc_page(addr);
128 	pageno = trunc_page(addr);
129 
130 	/*
131 	 * Check the permissions for the area we're interested in.
132 	 */
133 
134 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
135 				     VM_PROT_WRITE, FALSE) == FALSE) {
136 		/*
137 		 * If the page was not writable, we make it so.
138 		 * XXX It is possible a page may *not* be read/executable,
139 		 * if a process changes that!
140 		 */
141 		fix_prot = 1;
142 		/* The page isn't writable, so let's try making it so... */
143 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
144 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
145 		  return EFAULT;	/* I guess... */
146 	}
147 
148 	/*
149 	 * Now we need to get the page.  out_entry, out_prot, wflags, and
150 	 * single_use aren't used.  One would think the vm code would be
151 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
152 	 * change the map argument.
153 	 */
154 
155 	tmap = map;
156 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
157 			   &object, &pindex, &out_prot, &wflags);
158 	if (rv != KERN_SUCCESS)
159 		return EINVAL;
160 
161 	/*
162 	 * Okay, we've got the page.  Let's release tmap.
163 	 */
164 	vm_map_lookup_done (tmap, out_entry, 0);
165 
166 	/*
167 	 * Fault the page in...
168 	 */
169 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
170 	if (rv != KERN_SUCCESS)
171 		return EFAULT;
172 
173 	/* Find space in kernel_map for the page we're interested in */
174 	rv = vm_map_find (&kernel_map, object, NULL,
175 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
176 			  PAGE_SIZE, FALSE,
177 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
178 			  VM_PROT_ALL, VM_PROT_ALL, 0);
179 	if (!rv) {
180 		vm_object_reference XXX (object);
181 
182 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
183 		if (!rv) {
184 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
185 		}
186 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
187 	}
188 
189 	if (fix_prot)
190 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
191 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
192 	return rv;
193 }
194 #endif
195 
196 /*
197  * Process debugging system call.
198  *
199  * MPALMOSTSAFE
200  */
201 int
202 sys_ptrace(struct ptrace_args *uap)
203 {
204 	struct proc *p = curproc;
205 
206 	/*
207 	 * XXX this obfuscation is to reduce stack usage, but the register
208 	 * structs may be too large to put on the stack anyway.
209 	 */
210 	union {
211 		struct ptrace_io_desc piod;
212 		struct dbreg dbreg;
213 		struct fpreg fpreg;
214 		struct reg reg;
215 	} r;
216 	void *addr;
217 	int error = 0;
218 
219 	addr = &r;
220 	switch (uap->req) {
221 	case PT_GETREGS:
222 	case PT_GETFPREGS:
223 #ifdef PT_GETDBREGS
224 	case PT_GETDBREGS:
225 #endif
226 		break;
227 	case PT_SETREGS:
228 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
229 		break;
230 	case PT_SETFPREGS:
231 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
232 		break;
233 #ifdef PT_SETDBREGS
234 	case PT_SETDBREGS:
235 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
236 		break;
237 #endif
238 	case PT_IO:
239 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
240 		break;
241 	default:
242 		addr = uap->addr;
243 	}
244 	if (error)
245 		return (error);
246 
247 	error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
248 			&uap->sysmsg_result);
249 	if (error)
250 		return (error);
251 
252 	switch (uap->req) {
253 	case PT_IO:
254 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
255 		break;
256 	case PT_GETREGS:
257 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
258 		break;
259 	case PT_GETFPREGS:
260 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
261 		break;
262 #ifdef PT_GETDBREGS
263 	case PT_GETDBREGS:
264 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
265 		break;
266 #endif
267 	}
268 
269 	return (error);
270 }
271 
272 int
273 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
274 	    int data, int *res)
275 {
276 	struct proc *p, *pp;
277 	struct lwp *lp;
278 	struct iovec iov;
279 	struct uio uio;
280 	struct ptrace_io_desc *piod;
281 	int error = 0;
282 	int write, tmp;
283 	int t;
284 
285 	write = 0;
286 	if (req == PT_TRACE_ME) {
287 		p = curp;
288 		PHOLD(p);
289 	} else {
290 		if ((p = pfind(pid)) == NULL)
291 			return ESRCH;
292 	}
293 	if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
294 		PRELE(p);
295 		return (ESRCH);
296 	}
297 	if (p->p_flags & P_SYSTEM) {
298 		PRELE(p);
299 		return EINVAL;
300 	}
301 
302 	lwkt_gettoken(&p->p_token);
303 	/* Can't trace a process that's currently exec'ing. */
304 	if ((p->p_flags & P_INEXEC) != 0) {
305 		lwkt_reltoken(&p->p_token);
306 		PRELE(p);
307 		return EAGAIN;
308 	}
309 
310 	/*
311 	 * Permissions check
312 	 */
313 	switch (req) {
314 	case PT_TRACE_ME:
315 		/* Always legal. */
316 		break;
317 
318 	case PT_ATTACH:
319 		/* Self */
320 		if (p->p_pid == curp->p_pid) {
321 			lwkt_reltoken(&p->p_token);
322 			PRELE(p);
323 			return EINVAL;
324 		}
325 
326 		/* Already traced */
327 		if (p->p_flags & P_TRACED) {
328 			lwkt_reltoken(&p->p_token);
329 			PRELE(p);
330 			return EBUSY;
331 		}
332 
333 		if (curp->p_flags & P_TRACED)
334 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
335 				if (pp == p) {
336 					lwkt_reltoken(&p->p_token);
337 					PRELE(p);
338 					return (EINVAL);
339 				}
340 
341 		/* not owned by you, has done setuid (unless you're root) */
342 		if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
343 		     (p->p_flags & P_SUGID)) {
344 			if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
345 				lwkt_reltoken(&p->p_token);
346 				PRELE(p);
347 				return error;
348 			}
349 		}
350 
351 		/* can't trace init when securelevel > 0 */
352 		if (securelevel > 0 && p->p_pid == 1) {
353 			lwkt_reltoken(&p->p_token);
354 			PRELE(p);
355 			return EPERM;
356 		}
357 
358 		/* OK */
359 		break;
360 
361 	case PT_READ_I:
362 	case PT_READ_D:
363 	case PT_WRITE_I:
364 	case PT_WRITE_D:
365 	case PT_IO:
366 	case PT_CONTINUE:
367 	case PT_KILL:
368 	case PT_STEP:
369 	case PT_DETACH:
370 #ifdef PT_GETREGS
371 	case PT_GETREGS:
372 #endif
373 #ifdef PT_SETREGS
374 	case PT_SETREGS:
375 #endif
376 #ifdef PT_GETFPREGS
377 	case PT_GETFPREGS:
378 #endif
379 #ifdef PT_SETFPREGS
380 	case PT_SETFPREGS:
381 #endif
382 #ifdef PT_GETDBREGS
383 	case PT_GETDBREGS:
384 #endif
385 #ifdef PT_SETDBREGS
386 	case PT_SETDBREGS:
387 #endif
388 		/* not being traced... */
389 		if ((p->p_flags & P_TRACED) == 0) {
390 			lwkt_reltoken(&p->p_token);
391 			PRELE(p);
392 			return EPERM;
393 		}
394 
395 		/* not being traced by YOU */
396 		if (p->p_pptr != curp) {
397 			lwkt_reltoken(&p->p_token);
398 			PRELE(p);
399 			return EBUSY;
400 		}
401 
402 		/* not currently stopped */
403 		if (p->p_stat != SSTOP ||
404 		    (p->p_flags & P_WAITED) == 0) {
405 			lwkt_reltoken(&p->p_token);
406 			PRELE(p);
407 			return EBUSY;
408 		}
409 
410 		/* OK */
411 		break;
412 
413 	default:
414 		lwkt_reltoken(&p->p_token);
415 		PRELE(p);
416 		return EINVAL;
417 	}
418 
419 	/* XXX lwp */
420 	lp = FIRST_LWP_IN_PROC(p);
421 #ifdef FIX_SSTEP
422 	/*
423 	 * Single step fixup ala procfs
424 	 */
425 	FIX_SSTEP(lp);
426 #endif
427 
428 	/*
429 	 * Actually do the requests
430 	 */
431 
432 	*res = 0;
433 
434 	switch (req) {
435 	case PT_TRACE_ME:
436 		/* set my trace flag and "owner" so it can read/write me */
437 		p->p_flags |= P_TRACED;
438 		p->p_oppid = p->p_pptr->p_pid;
439 		lwkt_reltoken(&p->p_token);
440 		PRELE(p);
441 		return 0;
442 
443 	case PT_ATTACH:
444 		/* security check done above */
445 		p->p_flags |= P_TRACED;
446 		p->p_oppid = p->p_pptr->p_pid;
447 		proc_reparent(p, curp);
448 		data = SIGSTOP;
449 		goto sendsig;	/* in PT_CONTINUE below */
450 
451 	case PT_STEP:
452 	case PT_CONTINUE:
453 	case PT_DETACH:
454 		/* Zero means do not send any signal */
455 		if (data < 0 || data > _SIG_MAXSIG) {
456 			lwkt_reltoken(&p->p_token);
457 			PRELE(p);
458 			return EINVAL;
459 		}
460 
461 		LWPHOLD(lp);
462 
463 		if (req == PT_STEP) {
464 			if ((error = ptrace_single_step (lp))) {
465 				LWPRELE(lp);
466 				lwkt_reltoken(&p->p_token);
467 				PRELE(p);
468 				return error;
469 			}
470 		}
471 
472 		if (addr != (void *)1) {
473 			if ((error = ptrace_set_pc (lp,
474 			    (u_long)(uintfptr_t)addr))) {
475 				LWPRELE(lp);
476 				lwkt_reltoken(&p->p_token);
477 				PRELE(p);
478 				return error;
479 			}
480 		}
481 		LWPRELE(lp);
482 
483 		if (req == PT_DETACH) {
484 			/* reset process parent */
485 			if (p->p_oppid != p->p_pptr->p_pid) {
486 				struct proc *pp;
487 
488 				pp = pfind(p->p_oppid);
489 				if (pp) {
490 					proc_reparent(p, pp);
491 					PRELE(pp);
492 				}
493 			}
494 
495 			p->p_flags &= ~(P_TRACED | P_WAITED);
496 			p->p_oppid = 0;
497 
498 			/* should we send SIGCHLD? */
499 		}
500 
501 	sendsig:
502 		/*
503 		 * Deliver or queue signal.  If the process is stopped
504 		 * force it to be SACTIVE again.
505 		 */
506 		crit_enter();
507 		if (p->p_stat == SSTOP) {
508 			p->p_xstat = data;
509 			proc_unstop(p, SSTOP);
510 		} else if (data) {
511 			ksignal(p, data);
512 		}
513 		crit_exit();
514 		lwkt_reltoken(&p->p_token);
515 		PRELE(p);
516 		return 0;
517 
518 	case PT_WRITE_I:
519 	case PT_WRITE_D:
520 		write = 1;
521 		/* fallthrough */
522 	case PT_READ_I:
523 	case PT_READ_D:
524 		/*
525 		 * NOTE! uio_offset represents the offset in the target
526 		 * process.  The iov is in the current process (the guy
527 		 * making the ptrace call) so uio_td must be the current
528 		 * process (though for a SYSSPACE transfer it doesn't
529 		 * really matter).
530 		 */
531 		tmp = 0;
532 		/* write = 0 set above */
533 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
534 		iov.iov_len = sizeof(int);
535 		uio.uio_iov = &iov;
536 		uio.uio_iovcnt = 1;
537 		uio.uio_offset = (off_t)(uintptr_t)addr;
538 		uio.uio_resid = sizeof(int);
539 		uio.uio_segflg = UIO_SYSSPACE;
540 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
541 		uio.uio_td = curthread;
542 		error = procfs_domem(curp, lp, NULL, &uio);
543 		if (uio.uio_resid != 0) {
544 			/*
545 			 * XXX procfs_domem() doesn't currently return ENOSPC,
546 			 * so I think write() can bogusly return 0.
547 			 * XXX what happens for short writes?  We don't want
548 			 * to write partial data.
549 			 * XXX procfs_domem() returns EPERM for other invalid
550 			 * addresses.  Convert this to EINVAL.  Does this
551 			 * clobber returns of EPERM for other reasons?
552 			 */
553 			if (error == 0 || error == ENOSPC || error == EPERM)
554 				error = EINVAL;	/* EOF */
555 		}
556 		if (!write)
557 			*res = tmp;
558 		lwkt_reltoken(&p->p_token);
559 		PRELE(p);
560 		return (error);
561 
562 	case PT_IO:
563 		/*
564 		 * NOTE! uio_offset represents the offset in the target
565 		 * process.  The iov is in the current process (the guy
566 		 * making the ptrace call) so uio_td must be the current
567 		 * process.
568 		 */
569 		piod = addr;
570 		iov.iov_base = piod->piod_addr;
571 		iov.iov_len = piod->piod_len;
572 		uio.uio_iov = &iov;
573 		uio.uio_iovcnt = 1;
574 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
575 		uio.uio_resid = piod->piod_len;
576 		uio.uio_segflg = UIO_USERSPACE;
577 		uio.uio_td = curthread;
578 		switch (piod->piod_op) {
579 		case PIOD_READ_D:
580 		case PIOD_READ_I:
581 			uio.uio_rw = UIO_READ;
582 			break;
583 		case PIOD_WRITE_D:
584 		case PIOD_WRITE_I:
585 			uio.uio_rw = UIO_WRITE;
586 			break;
587 		default:
588 			lwkt_reltoken(&p->p_token);
589 			PRELE(p);
590 			return (EINVAL);
591 		}
592 		error = procfs_domem(curp, lp, NULL, &uio);
593 		piod->piod_len -= uio.uio_resid;
594 		lwkt_reltoken(&p->p_token);
595 		PRELE(p);
596 		return (error);
597 
598 	case PT_KILL:
599 		data = SIGKILL;
600 		goto sendsig;	/* in PT_CONTINUE above */
601 
602 #ifdef PT_SETREGS
603 	case PT_SETREGS:
604 		write = 1;
605 		/* fallthrough */
606 #endif /* PT_SETREGS */
607 #ifdef PT_GETREGS
608 	case PT_GETREGS:
609 		/* write = 0 above */
610 #endif /* PT_SETREGS */
611 #if defined(PT_SETREGS) || defined(PT_GETREGS)
612 		if (!procfs_validregs(lp)) {
613 			lwkt_reltoken(&p->p_token);
614 			PRELE(p);
615 			return EINVAL;
616 		} else {
617 			iov.iov_base = addr;
618 			iov.iov_len = sizeof(struct reg);
619 			uio.uio_iov = &iov;
620 			uio.uio_iovcnt = 1;
621 			uio.uio_offset = 0;
622 			uio.uio_resid = sizeof(struct reg);
623 			uio.uio_segflg = UIO_SYSSPACE;
624 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
625 			uio.uio_td = curthread;
626 			t = procfs_doregs(curp, lp, NULL, &uio);
627 			lwkt_reltoken(&p->p_token);
628 			PRELE(p);
629 			return t;
630 		}
631 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
632 
633 #ifdef PT_SETFPREGS
634 	case PT_SETFPREGS:
635 		write = 1;
636 		/* fallthrough */
637 #endif /* PT_SETFPREGS */
638 #ifdef PT_GETFPREGS
639 	case PT_GETFPREGS:
640 		/* write = 0 above */
641 #endif /* PT_SETFPREGS */
642 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
643 		if (!procfs_validfpregs(lp)) {
644 			lwkt_reltoken(&p->p_token);
645 			PRELE(p);
646 			return EINVAL;
647 		} else {
648 			iov.iov_base = addr;
649 			iov.iov_len = sizeof(struct fpreg);
650 			uio.uio_iov = &iov;
651 			uio.uio_iovcnt = 1;
652 			uio.uio_offset = 0;
653 			uio.uio_resid = sizeof(struct fpreg);
654 			uio.uio_segflg = UIO_SYSSPACE;
655 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
656 			uio.uio_td = curthread;
657 			t = procfs_dofpregs(curp, lp, NULL, &uio);
658 			lwkt_reltoken(&p->p_token);
659 			PRELE(p);
660 			return t;
661 		}
662 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
663 
664 #ifdef PT_SETDBREGS
665 	case PT_SETDBREGS:
666 		write = 1;
667 		/* fallthrough */
668 #endif /* PT_SETDBREGS */
669 #ifdef PT_GETDBREGS
670 	case PT_GETDBREGS:
671 		/* write = 0 above */
672 #endif /* PT_SETDBREGS */
673 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
674 		if (!procfs_validdbregs(lp)) {
675 			lwkt_reltoken(&p->p_token);
676 			PRELE(p);
677 			return EINVAL;
678 		} else {
679 			iov.iov_base = addr;
680 			iov.iov_len = sizeof(struct dbreg);
681 			uio.uio_iov = &iov;
682 			uio.uio_iovcnt = 1;
683 			uio.uio_offset = 0;
684 			uio.uio_resid = sizeof(struct dbreg);
685 			uio.uio_segflg = UIO_SYSSPACE;
686 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
687 			uio.uio_td = curthread;
688 			t = procfs_dodbregs(curp, lp, NULL, &uio);
689 			lwkt_reltoken(&p->p_token);
690 			PRELE(p);
691 			return t;
692 		}
693 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
694 
695 	default:
696 		break;
697 	}
698 
699 	lwkt_reltoken(&p->p_token);
700 	PRELE(p);
701 
702 	return 0;
703 }
704 
705 int
706 trace_req(struct proc *p)
707 {
708 	return 1;
709 }
710 
711 /*
712  * stopevent()
713  *
714  * Stop a process because of a procfs event.  Stay stopped until p->p_step
715  * is cleared (cleared by PIOCCONT in procfs).
716  *
717  * MPSAFE
718  */
719 void
720 stopevent(struct proc *p, unsigned int event, unsigned int val)
721 {
722 	/*
723 	 * Set event info.  Recheck p_stops in case we are
724 	 * racing a close() on procfs.
725 	 */
726 	spin_lock(&p->p_spin);
727 	if ((p->p_stops & event) == 0) {
728 		spin_unlock(&p->p_spin);
729 		return;
730 	}
731 	p->p_xstat = val;
732 	p->p_stype = event;
733 	p->p_step = 1;
734 	tsleep_interlock(&p->p_step, 0);
735 	spin_unlock(&p->p_spin);
736 
737 	/*
738 	 * Wakeup any PIOCWAITing procs and wait for p_step to
739 	 * be cleared.
740 	 */
741 	for (;;) {
742 		wakeup(&p->p_stype);
743 		tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
744 		spin_lock(&p->p_spin);
745 		if (p->p_step == 0) {
746 			spin_unlock(&p->p_spin);
747 			break;
748 		}
749 		tsleep_interlock(&p->p_step, 0);
750 		spin_unlock(&p->p_spin);
751 	}
752 }
753 
754