xref: /dragonfly/sys/kern/sys_process.c (revision f2187f0a)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32  * $DragonFly: src/sys/kern/sys_process.c,v 1.30 2007/02/19 01:14:23 corecode Exp $
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 
50 #include <sys/user.h>
51 #include <vfs/procfs/procfs.h>
52 
53 #include <sys/thread2.h>
54 #include <sys/spinlock2.h>
55 
56 /* use the equivalent procfs code */
57 #if 0
58 static int
59 pread (struct proc *procp, unsigned int addr, unsigned int *retval)
60 {
61 	int		rv;
62 	vm_map_t	map, tmap;
63 	vm_object_t	object;
64 	vm_map_backing_t ba;
65 	vm_offset_t	kva = 0;
66 	int		page_offset;	/* offset into page */
67 	vm_offset_t	pageno;		/* page number */
68 	vm_map_entry_t	out_entry;
69 	vm_prot_t	out_prot;
70 	int		wflags;
71 	vm_pindex_t	pindex;
72 
73 	/* Map page into kernel space */
74 
75 	map = &procp->p_vmspace->vm_map;
76 
77 	page_offset = addr - trunc_page(addr);
78 	pageno = trunc_page(addr);
79 
80 	tmap = map;
81 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
82 			   &ba, &pindex, &out_prot, &wflags);
83 	if (ba)
84 		object = ba->object;
85 	else
86 		object = NULL;
87 
88 
89 	if (rv != KERN_SUCCESS)
90 		return EINVAL;
91 
92 	vm_map_lookup_done (tmap, out_entry, 0);
93 
94 	/* Find space in kernel_map for the page we're interested in */
95 	rv = vm_map_find (&kernel_map, object, NULL,
96 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
97 			  PAGE_SIZE, FALSE,
98 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
99 			  VM_PROT_ALL, VM_PROT_ALL, 0);
100 
101 	if (!rv) {
102 		vm_object_reference XXX (object);
103 
104 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
105 		if (!rv) {
106 			*retval = 0;
107 			bcopy ((caddr_t)kva + page_offset,
108 			       retval, sizeof *retval);
109 		}
110 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
111 	}
112 
113 	return rv;
114 }
115 
116 static int
117 pwrite (struct proc *procp, unsigned int addr, unsigned int datum)
118 {
119 	int		rv;
120 	vm_map_t	map, tmap;
121 	vm_object_t	object;
122 	vm_map_backing_t ba;
123 	vm_offset_t	kva = 0;
124 	int		page_offset;	/* offset into page */
125 	vm_offset_t	pageno;		/* page number */
126 	vm_map_entry_t	out_entry;
127 	vm_prot_t	out_prot;
128 	int		wflags;
129 	vm_pindex_t	pindex;
130 	boolean_t	fix_prot = 0;
131 
132 	/* Map page into kernel space */
133 
134 	map = &procp->p_vmspace->vm_map;
135 
136 	page_offset = addr - trunc_page(addr);
137 	pageno = trunc_page(addr);
138 
139 	/*
140 	 * Check the permissions for the area we're interested in.
141 	 */
142 
143 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
144 				     VM_PROT_WRITE, FALSE) == FALSE) {
145 		/*
146 		 * If the page was not writable, we make it so.
147 		 * XXX It is possible a page may *not* be read/executable,
148 		 * if a process changes that!
149 		 */
150 		fix_prot = 1;
151 		/* The page isn't writable, so let's try making it so... */
152 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
153 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
154 		  return EFAULT;	/* I guess... */
155 	}
156 
157 	/*
158 	 * Now we need to get the page.  out_entry, out_prot, wflags, and
159 	 * single_use aren't used.  One would think the vm code would be
160 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
161 	 * change the map argument.
162 	 */
163 
164 	tmap = map;
165 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
166 			   &ba, &pindex, &out_prot, &wflags);
167 	if (ba)
168 		object = ba->object;
169 	else
170 		object = NULL;
171 
172 	if (rv != KERN_SUCCESS)
173 		return EINVAL;
174 
175 	/*
176 	 * Okay, we've got the page.  Let's release tmap.
177 	 */
178 	vm_map_lookup_done (tmap, out_entry, 0);
179 
180 	/*
181 	 * Fault the page in...
182 	 */
183 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
184 	if (rv != KERN_SUCCESS)
185 		return EFAULT;
186 
187 	/* Find space in kernel_map for the page we're interested in */
188 	rv = vm_map_find (&kernel_map, object, NULL,
189 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
190 			  PAGE_SIZE, FALSE,
191 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
192 			  VM_PROT_ALL, VM_PROT_ALL, 0);
193 	if (!rv) {
194 		vm_object_reference XXX (object);
195 
196 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
197 		if (!rv) {
198 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
199 		}
200 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
201 	}
202 
203 	if (fix_prot)
204 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
205 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
206 	return rv;
207 }
208 #endif
209 
210 /*
211  * Process debugging system call.
212  *
213  * MPALMOSTSAFE
214  */
215 int
216 sys_ptrace(struct ptrace_args *uap)
217 {
218 	struct proc *p = curproc;
219 
220 	/*
221 	 * XXX this obfuscation is to reduce stack usage, but the register
222 	 * structs may be too large to put on the stack anyway.
223 	 */
224 	union {
225 		struct ptrace_io_desc piod;
226 		struct dbreg dbreg;
227 		struct fpreg fpreg;
228 		struct reg reg;
229 	} r;
230 	void *addr;
231 	int error = 0;
232 
233 	addr = &r;
234 	switch (uap->req) {
235 	case PT_GETREGS:
236 	case PT_GETFPREGS:
237 #ifdef PT_GETDBREGS
238 	case PT_GETDBREGS:
239 #endif
240 		break;
241 	case PT_SETREGS:
242 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
243 		break;
244 	case PT_SETFPREGS:
245 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
246 		break;
247 #ifdef PT_SETDBREGS
248 	case PT_SETDBREGS:
249 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
250 		break;
251 #endif
252 	case PT_IO:
253 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
254 		break;
255 	default:
256 		addr = uap->addr;
257 	}
258 	if (error)
259 		return (error);
260 
261 	error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
262 			&uap->sysmsg_result);
263 	if (error)
264 		return (error);
265 
266 	switch (uap->req) {
267 	case PT_IO:
268 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
269 		break;
270 	case PT_GETREGS:
271 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
272 		break;
273 	case PT_GETFPREGS:
274 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
275 		break;
276 #ifdef PT_GETDBREGS
277 	case PT_GETDBREGS:
278 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
279 		break;
280 #endif
281 	}
282 
283 	return (error);
284 }
285 
286 int
287 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
288 	    int data, int *res)
289 {
290 	struct proc *p, *pp;
291 	struct lwp *lp;
292 	struct iovec iov;
293 	struct uio uio;
294 	struct ptrace_io_desc *piod;
295 	int error = 0;
296 	int write, tmp;
297 	int t;
298 
299 	write = 0;
300 	if (req == PT_TRACE_ME) {
301 		p = curp;
302 		PHOLD(p);
303 	} else {
304 		if ((p = pfind(pid)) == NULL)
305 			return ESRCH;
306 	}
307 	if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
308 		PRELE(p);
309 		return (ESRCH);
310 	}
311 	if (p->p_flags & P_SYSTEM) {
312 		PRELE(p);
313 		return EINVAL;
314 	}
315 
316 	lwkt_gettoken(&p->p_token);
317 	/* Can't trace a process that's currently exec'ing. */
318 	if ((p->p_flags & P_INEXEC) != 0) {
319 		lwkt_reltoken(&p->p_token);
320 		PRELE(p);
321 		return EAGAIN;
322 	}
323 
324 	/*
325 	 * Permissions check
326 	 */
327 	switch (req) {
328 	case PT_TRACE_ME:
329 		/* Always legal. */
330 		break;
331 
332 	case PT_ATTACH:
333 		/* Self */
334 		if (p->p_pid == curp->p_pid) {
335 			lwkt_reltoken(&p->p_token);
336 			PRELE(p);
337 			return EINVAL;
338 		}
339 
340 		/* Already traced */
341 		if (p->p_flags & P_TRACED) {
342 			lwkt_reltoken(&p->p_token);
343 			PRELE(p);
344 			return EBUSY;
345 		}
346 
347 		if (curp->p_flags & P_TRACED)
348 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
349 				if (pp == p) {
350 					lwkt_reltoken(&p->p_token);
351 					PRELE(p);
352 					return (EINVAL);
353 				}
354 
355 		/* not owned by you, has done setuid (unless you're root) */
356 		if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
357 		     (p->p_flags & P_SUGID)) {
358 			if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
359 				lwkt_reltoken(&p->p_token);
360 				PRELE(p);
361 				return error;
362 			}
363 		}
364 
365 		/* can't trace init when securelevel > 0 */
366 		if (securelevel > 0 && p->p_pid == 1) {
367 			lwkt_reltoken(&p->p_token);
368 			PRELE(p);
369 			return EPERM;
370 		}
371 
372 		/* OK */
373 		break;
374 
375 	case PT_READ_I:
376 	case PT_READ_D:
377 	case PT_WRITE_I:
378 	case PT_WRITE_D:
379 	case PT_IO:
380 	case PT_CONTINUE:
381 	case PT_KILL:
382 	case PT_STEP:
383 	case PT_DETACH:
384 #ifdef PT_GETREGS
385 	case PT_GETREGS:
386 #endif
387 #ifdef PT_SETREGS
388 	case PT_SETREGS:
389 #endif
390 #ifdef PT_GETFPREGS
391 	case PT_GETFPREGS:
392 #endif
393 #ifdef PT_SETFPREGS
394 	case PT_SETFPREGS:
395 #endif
396 #ifdef PT_GETDBREGS
397 	case PT_GETDBREGS:
398 #endif
399 #ifdef PT_SETDBREGS
400 	case PT_SETDBREGS:
401 #endif
402 		/* not being traced... */
403 		if ((p->p_flags & P_TRACED) == 0) {
404 			lwkt_reltoken(&p->p_token);
405 			PRELE(p);
406 			return EPERM;
407 		}
408 
409 		/* not being traced by YOU */
410 		if (p->p_pptr != curp) {
411 			lwkt_reltoken(&p->p_token);
412 			PRELE(p);
413 			return EBUSY;
414 		}
415 
416 		/* not currently stopped */
417 		if (p->p_stat != SSTOP ||
418 		    (p->p_flags & P_WAITED) == 0) {
419 			lwkt_reltoken(&p->p_token);
420 			PRELE(p);
421 			return EBUSY;
422 		}
423 
424 		/* OK */
425 		break;
426 
427 	default:
428 		lwkt_reltoken(&p->p_token);
429 		PRELE(p);
430 		return EINVAL;
431 	}
432 
433 	/* XXX lwp */
434 	lp = FIRST_LWP_IN_PROC(p);
435 #ifdef FIX_SSTEP
436 	/*
437 	 * Single step fixup ala procfs
438 	 */
439 	FIX_SSTEP(lp);
440 #endif
441 
442 	/*
443 	 * Actually do the requests
444 	 */
445 
446 	*res = 0;
447 
448 	switch (req) {
449 	case PT_TRACE_ME:
450 		/* set my trace flag and "owner" so it can read/write me */
451 		p->p_flags |= P_TRACED;
452 		p->p_oppid = p->p_pptr->p_pid;
453 		lwkt_reltoken(&p->p_token);
454 		PRELE(p);
455 		return 0;
456 
457 	case PT_ATTACH:
458 		/* security check done above */
459 		p->p_flags |= P_TRACED;
460 		p->p_oppid = p->p_pptr->p_pid;
461 		proc_reparent(p, curp);
462 		data = SIGSTOP;
463 		goto sendsig;	/* in PT_CONTINUE below */
464 
465 	case PT_STEP:
466 	case PT_CONTINUE:
467 	case PT_DETACH:
468 		/* Zero means do not send any signal */
469 		if (data < 0 || data > _SIG_MAXSIG) {
470 			lwkt_reltoken(&p->p_token);
471 			PRELE(p);
472 			return EINVAL;
473 		}
474 
475 		LWPHOLD(lp);
476 
477 		if (req == PT_STEP) {
478 			if ((error = ptrace_single_step (lp))) {
479 				LWPRELE(lp);
480 				lwkt_reltoken(&p->p_token);
481 				PRELE(p);
482 				return error;
483 			}
484 		}
485 
486 		if (addr != (void *)1) {
487 			if ((error = ptrace_set_pc (lp,
488 			    (u_long)(uintfptr_t)addr))) {
489 				LWPRELE(lp);
490 				lwkt_reltoken(&p->p_token);
491 				PRELE(p);
492 				return error;
493 			}
494 		}
495 		LWPRELE(lp);
496 
497 		if (req == PT_DETACH) {
498 			/* reset process parent */
499 			if (p->p_oppid != p->p_pptr->p_pid) {
500 				struct proc *pp;
501 
502 				pp = pfind(p->p_oppid);
503 				if (pp) {
504 					proc_reparent(p, pp);
505 					PRELE(pp);
506 				}
507 			}
508 
509 			p->p_flags &= ~(P_TRACED | P_WAITED);
510 			p->p_oppid = 0;
511 
512 			/* should we send SIGCHLD? */
513 		}
514 
515 	sendsig:
516 		/*
517 		 * Deliver or queue signal.  If the process is stopped
518 		 * force it to be SACTIVE again.
519 		 */
520 		crit_enter();
521 		if (p->p_stat == SSTOP) {
522 			p->p_xstat = data;
523 			proc_unstop(p, SSTOP);
524 		} else if (data) {
525 			ksignal(p, data);
526 		}
527 		crit_exit();
528 		lwkt_reltoken(&p->p_token);
529 		PRELE(p);
530 		return 0;
531 
532 	case PT_WRITE_I:
533 	case PT_WRITE_D:
534 		write = 1;
535 		/* fallthrough */
536 	case PT_READ_I:
537 	case PT_READ_D:
538 		/*
539 		 * NOTE! uio_offset represents the offset in the target
540 		 * process.  The iov is in the current process (the guy
541 		 * making the ptrace call) so uio_td must be the current
542 		 * process (though for a SYSSPACE transfer it doesn't
543 		 * really matter).
544 		 */
545 		tmp = 0;
546 		/* write = 0 set above */
547 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
548 		iov.iov_len = sizeof(int);
549 		uio.uio_iov = &iov;
550 		uio.uio_iovcnt = 1;
551 		uio.uio_offset = (off_t)(uintptr_t)addr;
552 		uio.uio_resid = sizeof(int);
553 		uio.uio_segflg = UIO_SYSSPACE;
554 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
555 		uio.uio_td = curthread;
556 		error = procfs_domem(curp, lp, NULL, &uio);
557 		if (uio.uio_resid != 0) {
558 			/*
559 			 * XXX procfs_domem() doesn't currently return ENOSPC,
560 			 * so I think write() can bogusly return 0.
561 			 * XXX what happens for short writes?  We don't want
562 			 * to write partial data.
563 			 * XXX procfs_domem() returns EPERM for other invalid
564 			 * addresses.  Convert this to EINVAL.  Does this
565 			 * clobber returns of EPERM for other reasons?
566 			 */
567 			if (error == 0 || error == ENOSPC || error == EPERM)
568 				error = EINVAL;	/* EOF */
569 		}
570 		if (!write)
571 			*res = tmp;
572 		lwkt_reltoken(&p->p_token);
573 		PRELE(p);
574 		return (error);
575 
576 	case PT_IO:
577 		/*
578 		 * NOTE! uio_offset represents the offset in the target
579 		 * process.  The iov is in the current process (the guy
580 		 * making the ptrace call) so uio_td must be the current
581 		 * process.
582 		 */
583 		piod = addr;
584 		iov.iov_base = piod->piod_addr;
585 		iov.iov_len = piod->piod_len;
586 		uio.uio_iov = &iov;
587 		uio.uio_iovcnt = 1;
588 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
589 		uio.uio_resid = piod->piod_len;
590 		uio.uio_segflg = UIO_USERSPACE;
591 		uio.uio_td = curthread;
592 		switch (piod->piod_op) {
593 		case PIOD_READ_D:
594 		case PIOD_READ_I:
595 			uio.uio_rw = UIO_READ;
596 			break;
597 		case PIOD_WRITE_D:
598 		case PIOD_WRITE_I:
599 			uio.uio_rw = UIO_WRITE;
600 			break;
601 		default:
602 			lwkt_reltoken(&p->p_token);
603 			PRELE(p);
604 			return (EINVAL);
605 		}
606 		error = procfs_domem(curp, lp, NULL, &uio);
607 		piod->piod_len -= uio.uio_resid;
608 		lwkt_reltoken(&p->p_token);
609 		PRELE(p);
610 		return (error);
611 
612 	case PT_KILL:
613 		data = SIGKILL;
614 		goto sendsig;	/* in PT_CONTINUE above */
615 
616 #ifdef PT_SETREGS
617 	case PT_SETREGS:
618 		write = 1;
619 		/* fallthrough */
620 #endif /* PT_SETREGS */
621 #ifdef PT_GETREGS
622 	case PT_GETREGS:
623 		/* write = 0 above */
624 #endif /* PT_SETREGS */
625 #if defined(PT_SETREGS) || defined(PT_GETREGS)
626 		if (!procfs_validregs(lp)) {
627 			lwkt_reltoken(&p->p_token);
628 			PRELE(p);
629 			return EINVAL;
630 		} else {
631 			iov.iov_base = addr;
632 			iov.iov_len = sizeof(struct reg);
633 			uio.uio_iov = &iov;
634 			uio.uio_iovcnt = 1;
635 			uio.uio_offset = 0;
636 			uio.uio_resid = sizeof(struct reg);
637 			uio.uio_segflg = UIO_SYSSPACE;
638 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
639 			uio.uio_td = curthread;
640 			t = procfs_doregs(curp, lp, NULL, &uio);
641 			lwkt_reltoken(&p->p_token);
642 			PRELE(p);
643 			return t;
644 		}
645 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
646 
647 #ifdef PT_SETFPREGS
648 	case PT_SETFPREGS:
649 		write = 1;
650 		/* fallthrough */
651 #endif /* PT_SETFPREGS */
652 #ifdef PT_GETFPREGS
653 	case PT_GETFPREGS:
654 		/* write = 0 above */
655 #endif /* PT_SETFPREGS */
656 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
657 		if (!procfs_validfpregs(lp)) {
658 			lwkt_reltoken(&p->p_token);
659 			PRELE(p);
660 			return EINVAL;
661 		} else {
662 			iov.iov_base = addr;
663 			iov.iov_len = sizeof(struct fpreg);
664 			uio.uio_iov = &iov;
665 			uio.uio_iovcnt = 1;
666 			uio.uio_offset = 0;
667 			uio.uio_resid = sizeof(struct fpreg);
668 			uio.uio_segflg = UIO_SYSSPACE;
669 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
670 			uio.uio_td = curthread;
671 			t = procfs_dofpregs(curp, lp, NULL, &uio);
672 			lwkt_reltoken(&p->p_token);
673 			PRELE(p);
674 			return t;
675 		}
676 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
677 
678 #ifdef PT_SETDBREGS
679 	case PT_SETDBREGS:
680 		write = 1;
681 		/* fallthrough */
682 #endif /* PT_SETDBREGS */
683 #ifdef PT_GETDBREGS
684 	case PT_GETDBREGS:
685 		/* write = 0 above */
686 #endif /* PT_SETDBREGS */
687 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
688 		if (!procfs_validdbregs(lp)) {
689 			lwkt_reltoken(&p->p_token);
690 			PRELE(p);
691 			return EINVAL;
692 		} else {
693 			iov.iov_base = addr;
694 			iov.iov_len = sizeof(struct dbreg);
695 			uio.uio_iov = &iov;
696 			uio.uio_iovcnt = 1;
697 			uio.uio_offset = 0;
698 			uio.uio_resid = sizeof(struct dbreg);
699 			uio.uio_segflg = UIO_SYSSPACE;
700 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
701 			uio.uio_td = curthread;
702 			t = procfs_dodbregs(curp, lp, NULL, &uio);
703 			lwkt_reltoken(&p->p_token);
704 			PRELE(p);
705 			return t;
706 		}
707 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
708 
709 	default:
710 		break;
711 	}
712 
713 	lwkt_reltoken(&p->p_token);
714 	PRELE(p);
715 
716 	return 0;
717 }
718 
719 int
720 trace_req(struct proc *p)
721 {
722 	return 1;
723 }
724 
725 /*
726  * stopevent()
727  *
728  * Stop a process because of a procfs event.  Stay stopped until p->p_step
729  * is cleared (cleared by PIOCCONT in procfs).
730  *
731  * MPSAFE
732  */
733 void
734 stopevent(struct proc *p, unsigned int event, unsigned int val)
735 {
736 	/*
737 	 * Set event info.  Recheck p_stops in case we are
738 	 * racing a close() on procfs.
739 	 */
740 	spin_lock(&p->p_spin);
741 	if ((p->p_stops & event) == 0) {
742 		spin_unlock(&p->p_spin);
743 		return;
744 	}
745 	p->p_xstat = val;
746 	p->p_stype = event;
747 	p->p_step = 1;
748 	tsleep_interlock(&p->p_step, 0);
749 	spin_unlock(&p->p_spin);
750 
751 	/*
752 	 * Wakeup any PIOCWAITing procs and wait for p_step to
753 	 * be cleared.
754 	 */
755 	for (;;) {
756 		wakeup(&p->p_stype);
757 		tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
758 		spin_lock(&p->p_spin);
759 		if (p->p_step == 0) {
760 			spin_unlock(&p->p_spin);
761 			break;
762 		}
763 		tsleep_interlock(&p->p_step, 0);
764 		spin_unlock(&p->p_spin);
765 	}
766 }
767 
768