xref: /dragonfly/sys/kern/sys_process.c (revision a9783bc6)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 
50 #include <vfs/procfs/procfs.h>
51 
52 #include <sys/thread2.h>
53 #include <sys/spinlock2.h>
54 
55 /* use the equivalent procfs code */
56 #if 0
57 static int
58 pread (struct proc *procp, unsigned int addr, unsigned int *retval)
59 {
60 	int		rv;
61 	vm_map_t	map, tmap;
62 	vm_object_t	object;
63 	vm_map_backing_t ba;
64 	vm_offset_t	kva = 0;
65 	int		page_offset;	/* offset into page */
66 	vm_offset_t	pageno;		/* page number */
67 	vm_map_entry_t	out_entry;
68 	vm_prot_t	out_prot;
69 	int		wflags;
70 	vm_pindex_t	pindex;
71 
72 	/* Map page into kernel space */
73 
74 	map = &procp->p_vmspace->vm_map;
75 
76 	page_offset = addr - trunc_page(addr);
77 	pageno = trunc_page(addr);
78 
79 	tmap = map;
80 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
81 			   &ba, &pindex, &out_prot, &wflags);
82 	if (ba)
83 		object = ba->object;
84 	else
85 		object = NULL;
86 
87 
88 	if (rv != KERN_SUCCESS)
89 		return EINVAL;
90 
91 	vm_map_lookup_done (tmap, out_entry, 0);
92 
93 	/* Find space in kernel_map for the page we're interested in */
94 	rv = vm_map_find (&kernel_map, object, NULL,
95 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
96 			  PAGE_SIZE, FALSE,
97 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
98 			  VM_PROT_ALL, VM_PROT_ALL, 0);
99 
100 	if (!rv) {
101 		vm_object_reference XXX (object);
102 
103 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
104 		if (!rv) {
105 			*retval = 0;
106 			bcopy ((caddr_t)kva + page_offset,
107 			       retval, sizeof *retval);
108 		}
109 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
110 	}
111 
112 	return rv;
113 }
114 
115 static int
116 pwrite (struct proc *procp, unsigned int addr, unsigned int datum)
117 {
118 	int		rv;
119 	vm_map_t	map, tmap;
120 	vm_object_t	object;
121 	vm_map_backing_t ba;
122 	vm_offset_t	kva = 0;
123 	int		page_offset;	/* offset into page */
124 	vm_offset_t	pageno;		/* page number */
125 	vm_map_entry_t	out_entry;
126 	vm_prot_t	out_prot;
127 	int		wflags;
128 	vm_pindex_t	pindex;
129 	boolean_t	fix_prot = 0;
130 
131 	/* Map page into kernel space */
132 
133 	map = &procp->p_vmspace->vm_map;
134 
135 	page_offset = addr - trunc_page(addr);
136 	pageno = trunc_page(addr);
137 
138 	/*
139 	 * Check the permissions for the area we're interested in.
140 	 */
141 
142 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
143 				     VM_PROT_WRITE, FALSE) == FALSE) {
144 		/*
145 		 * If the page was not writable, we make it so.
146 		 * XXX It is possible a page may *not* be read/executable,
147 		 * if a process changes that!
148 		 */
149 		fix_prot = 1;
150 		/* The page isn't writable, so let's try making it so... */
151 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
152 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
153 		  return EFAULT;	/* I guess... */
154 	}
155 
156 	/*
157 	 * Now we need to get the page.  out_entry, out_prot, wflags, and
158 	 * single_use aren't used.  One would think the vm code would be
159 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
160 	 * change the map argument.
161 	 */
162 
163 	tmap = map;
164 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
165 			   &ba, &pindex, &out_prot, &wflags);
166 	if (ba)
167 		object = ba->object;
168 	else
169 		object = NULL;
170 
171 	if (rv != KERN_SUCCESS)
172 		return EINVAL;
173 
174 	/*
175 	 * Okay, we've got the page.  Let's release tmap.
176 	 */
177 	vm_map_lookup_done (tmap, out_entry, 0);
178 
179 	/*
180 	 * Fault the page in...
181 	 */
182 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
183 	if (rv != KERN_SUCCESS)
184 		return EFAULT;
185 
186 	/* Find space in kernel_map for the page we're interested in */
187 	rv = vm_map_find (&kernel_map, object, NULL,
188 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
189 			  PAGE_SIZE, FALSE,
190 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
191 			  VM_PROT_ALL, VM_PROT_ALL, 0);
192 	if (!rv) {
193 		vm_object_reference XXX (object);
194 
195 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
196 		if (!rv) {
197 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
198 		}
199 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
200 	}
201 
202 	if (fix_prot)
203 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
204 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
205 	return rv;
206 }
207 #endif
208 
209 /*
210  * Process debugging system call.
211  *
212  * MPALMOSTSAFE
213  */
214 int
215 sys_ptrace(struct ptrace_args *uap)
216 {
217 	struct proc *p = curproc;
218 
219 	/*
220 	 * XXX this obfuscation is to reduce stack usage, but the register
221 	 * structs may be too large to put on the stack anyway.
222 	 */
223 	union {
224 		struct ptrace_io_desc piod;
225 		struct dbreg dbreg;
226 		struct fpreg fpreg;
227 		struct reg reg;
228 	} r;
229 	void *addr;
230 	int error = 0;
231 
232 	addr = &r;
233 	switch (uap->req) {
234 	case PT_GETREGS:
235 	case PT_GETFPREGS:
236 #ifdef PT_GETDBREGS
237 	case PT_GETDBREGS:
238 #endif
239 		break;
240 	case PT_SETREGS:
241 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
242 		break;
243 	case PT_SETFPREGS:
244 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
245 		break;
246 #ifdef PT_SETDBREGS
247 	case PT_SETDBREGS:
248 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
249 		break;
250 #endif
251 	case PT_IO:
252 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
253 		break;
254 	default:
255 		addr = uap->addr;
256 	}
257 	if (error)
258 		return (error);
259 
260 	error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
261 			&uap->sysmsg_result);
262 	if (error)
263 		return (error);
264 
265 	switch (uap->req) {
266 	case PT_IO:
267 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
268 		break;
269 	case PT_GETREGS:
270 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
271 		break;
272 	case PT_GETFPREGS:
273 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
274 		break;
275 #ifdef PT_GETDBREGS
276 	case PT_GETDBREGS:
277 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
278 		break;
279 #endif
280 	}
281 
282 	return (error);
283 }
284 
285 int
286 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
287 	    int data, int *res)
288 {
289 	struct proc *p, *pp;
290 	struct lwp *lp;
291 	struct iovec iov;
292 	struct uio uio;
293 	struct ptrace_io_desc *piod;
294 	int error = 0;
295 	int write, tmp;
296 	int t;
297 
298 	write = 0;
299 	if (req == PT_TRACE_ME) {
300 		p = curp;
301 		PHOLD(p);
302 	} else {
303 		if ((p = pfind(pid)) == NULL)
304 			return ESRCH;
305 	}
306 	if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
307 		PRELE(p);
308 		return (ESRCH);
309 	}
310 	if (p->p_flags & P_SYSTEM) {
311 		PRELE(p);
312 		return EINVAL;
313 	}
314 
315 	lwkt_gettoken(&p->p_token);
316 	/* Can't trace a process that's currently exec'ing. */
317 	if ((p->p_flags & P_INEXEC) != 0) {
318 		lwkt_reltoken(&p->p_token);
319 		PRELE(p);
320 		return EAGAIN;
321 	}
322 
323 	/*
324 	 * Permissions check
325 	 */
326 	switch (req) {
327 	case PT_TRACE_ME:
328 		/* Always legal. */
329 		break;
330 
331 	case PT_ATTACH:
332 		/* Self */
333 		if (p->p_pid == curp->p_pid) {
334 			lwkt_reltoken(&p->p_token);
335 			PRELE(p);
336 			return EINVAL;
337 		}
338 
339 		/* Already traced */
340 		if (p->p_flags & P_TRACED) {
341 			lwkt_reltoken(&p->p_token);
342 			PRELE(p);
343 			return EBUSY;
344 		}
345 
346 		if (curp->p_flags & P_TRACED)
347 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
348 				if (pp == p) {
349 					lwkt_reltoken(&p->p_token);
350 					PRELE(p);
351 					return (EINVAL);
352 				}
353 
354 		/* not owned by you, has done setuid (unless you're root) */
355 		if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
356 		     (p->p_flags & P_SUGID)) {
357 			if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
358 				lwkt_reltoken(&p->p_token);
359 				PRELE(p);
360 				return error;
361 			}
362 		}
363 
364 		/* can't trace init when securelevel > 0 */
365 		if (securelevel > 0 && p->p_pid == 1) {
366 			lwkt_reltoken(&p->p_token);
367 			PRELE(p);
368 			return EPERM;
369 		}
370 
371 		/* OK */
372 		break;
373 
374 	case PT_READ_I:
375 	case PT_READ_D:
376 	case PT_WRITE_I:
377 	case PT_WRITE_D:
378 	case PT_IO:
379 	case PT_CONTINUE:
380 	case PT_KILL:
381 	case PT_STEP:
382 	case PT_DETACH:
383 #ifdef PT_GETREGS
384 	case PT_GETREGS:
385 #endif
386 #ifdef PT_SETREGS
387 	case PT_SETREGS:
388 #endif
389 #ifdef PT_GETFPREGS
390 	case PT_GETFPREGS:
391 #endif
392 #ifdef PT_SETFPREGS
393 	case PT_SETFPREGS:
394 #endif
395 #ifdef PT_GETDBREGS
396 	case PT_GETDBREGS:
397 #endif
398 #ifdef PT_SETDBREGS
399 	case PT_SETDBREGS:
400 #endif
401 		/* not being traced... */
402 		if ((p->p_flags & P_TRACED) == 0) {
403 			lwkt_reltoken(&p->p_token);
404 			PRELE(p);
405 			return EPERM;
406 		}
407 
408 		/* not being traced by YOU */
409 		if (p->p_pptr != curp) {
410 			lwkt_reltoken(&p->p_token);
411 			PRELE(p);
412 			return EBUSY;
413 		}
414 
415 		/* not currently stopped */
416 		if (p->p_stat != SSTOP ||
417 		    (p->p_flags & P_WAITED) == 0) {
418 			lwkt_reltoken(&p->p_token);
419 			PRELE(p);
420 			return EBUSY;
421 		}
422 
423 		/* OK */
424 		break;
425 
426 	default:
427 		lwkt_reltoken(&p->p_token);
428 		PRELE(p);
429 		return EINVAL;
430 	}
431 
432 	/* XXX lwp */
433 	lp = FIRST_LWP_IN_PROC(p);
434 	if (lp == NULL) {
435 		lwkt_reltoken(&p->p_token);
436 		PRELE(p);
437 		return EINVAL;
438 	}
439 
440 #ifdef FIX_SSTEP
441 	/*
442 	 * Single step fixup ala procfs
443 	 */
444 	FIX_SSTEP(lp);
445 #endif
446 
447 	/*
448 	 * Actually do the requests
449 	 */
450 
451 	*res = 0;
452 
453 	switch (req) {
454 	case PT_TRACE_ME:
455 		/* set my trace flag and "owner" so it can read/write me */
456 		p->p_flags |= P_TRACED;
457 		p->p_oppid = p->p_pptr->p_pid;
458 		lwkt_reltoken(&p->p_token);
459 		PRELE(p);
460 		return 0;
461 
462 	case PT_ATTACH:
463 		/* security check done above */
464 		p->p_flags |= P_TRACED;
465 		p->p_oppid = p->p_pptr->p_pid;
466 		proc_reparent(p, curp);
467 		data = SIGSTOP;
468 		goto sendsig;	/* in PT_CONTINUE below */
469 
470 	case PT_STEP:
471 	case PT_CONTINUE:
472 	case PT_DETACH:
473 		/* Zero means do not send any signal */
474 		if (data < 0 || data > _SIG_MAXSIG) {
475 			lwkt_reltoken(&p->p_token);
476 			PRELE(p);
477 			return EINVAL;
478 		}
479 
480 		LWPHOLD(lp);
481 
482 		if (req == PT_STEP) {
483 			if ((error = ptrace_single_step (lp))) {
484 				LWPRELE(lp);
485 				lwkt_reltoken(&p->p_token);
486 				PRELE(p);
487 				return error;
488 			}
489 		}
490 
491 		if (addr != (void *)1) {
492 			if ((error = ptrace_set_pc (lp, (u_long)addr))) {
493 				LWPRELE(lp);
494 				lwkt_reltoken(&p->p_token);
495 				PRELE(p);
496 				return error;
497 			}
498 		}
499 		LWPRELE(lp);
500 
501 		if (req == PT_DETACH) {
502 			/* reset process parent */
503 			if (p->p_oppid != p->p_pptr->p_pid) {
504 				struct proc *pp;
505 
506 				pp = pfind(p->p_oppid);
507 				if (pp) {
508 					proc_reparent(p, pp);
509 					PRELE(pp);
510 				}
511 			}
512 
513 			p->p_flags &= ~(P_TRACED | P_WAITED);
514 			p->p_oppid = 0;
515 
516 			/* should we send SIGCHLD? */
517 		}
518 
519 	sendsig:
520 		/*
521 		 * Deliver or queue signal.  If the process is stopped
522 		 * force it to be SACTIVE again.
523 		 */
524 		crit_enter();
525 		if (p->p_stat == SSTOP) {
526 			p->p_xstat = data;
527 			proc_unstop(p, SSTOP);
528 		} else if (data) {
529 			ksignal(p, data);
530 		}
531 		crit_exit();
532 		lwkt_reltoken(&p->p_token);
533 		PRELE(p);
534 		return 0;
535 
536 	case PT_WRITE_I:
537 	case PT_WRITE_D:
538 		write = 1;
539 		/* fallthrough */
540 	case PT_READ_I:
541 	case PT_READ_D:
542 		/*
543 		 * NOTE! uio_offset represents the offset in the target
544 		 * process.  The iov is in the current process (the guy
545 		 * making the ptrace call) so uio_td must be the current
546 		 * process (though for a SYSSPACE transfer it doesn't
547 		 * really matter).
548 		 */
549 		tmp = 0;
550 		/* write = 0 set above */
551 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
552 		iov.iov_len = sizeof(int);
553 		uio.uio_iov = &iov;
554 		uio.uio_iovcnt = 1;
555 		uio.uio_offset = (off_t)(uintptr_t)addr;
556 		uio.uio_resid = sizeof(int);
557 		uio.uio_segflg = UIO_SYSSPACE;
558 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
559 		uio.uio_td = curthread;
560 		error = procfs_domem(curp, lp, NULL, &uio);
561 		if (uio.uio_resid != 0) {
562 			/*
563 			 * XXX procfs_domem() doesn't currently return ENOSPC,
564 			 * so I think write() can bogusly return 0.
565 			 * XXX what happens for short writes?  We don't want
566 			 * to write partial data.
567 			 * XXX procfs_domem() returns EPERM for other invalid
568 			 * addresses.  Convert this to EINVAL.  Does this
569 			 * clobber returns of EPERM for other reasons?
570 			 */
571 			if (error == 0 || error == ENOSPC || error == EPERM)
572 				error = EINVAL;	/* EOF */
573 		}
574 		if (!write)
575 			*res = tmp;
576 		lwkt_reltoken(&p->p_token);
577 		PRELE(p);
578 		return (error);
579 
580 	case PT_IO:
581 		/*
582 		 * NOTE! uio_offset represents the offset in the target
583 		 * process.  The iov is in the current process (the guy
584 		 * making the ptrace call) so uio_td must be the current
585 		 * process.
586 		 */
587 		piod = addr;
588 		iov.iov_base = piod->piod_addr;
589 		iov.iov_len = piod->piod_len;
590 		uio.uio_iov = &iov;
591 		uio.uio_iovcnt = 1;
592 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
593 		uio.uio_resid = piod->piod_len;
594 		uio.uio_segflg = UIO_USERSPACE;
595 		uio.uio_td = curthread;
596 		switch (piod->piod_op) {
597 		case PIOD_READ_D:
598 		case PIOD_READ_I:
599 			uio.uio_rw = UIO_READ;
600 			break;
601 		case PIOD_WRITE_D:
602 		case PIOD_WRITE_I:
603 			uio.uio_rw = UIO_WRITE;
604 			break;
605 		default:
606 			lwkt_reltoken(&p->p_token);
607 			PRELE(p);
608 			return (EINVAL);
609 		}
610 		error = procfs_domem(curp, lp, NULL, &uio);
611 		piod->piod_len -= uio.uio_resid;
612 		lwkt_reltoken(&p->p_token);
613 		PRELE(p);
614 		return (error);
615 
616 	case PT_KILL:
617 		data = SIGKILL;
618 		goto sendsig;	/* in PT_CONTINUE above */
619 
620 #ifdef PT_SETREGS
621 	case PT_SETREGS:
622 		write = 1;
623 		/* fallthrough */
624 #endif /* PT_SETREGS */
625 #ifdef PT_GETREGS
626 	case PT_GETREGS:
627 		/* write = 0 above */
628 #endif /* PT_SETREGS */
629 #if defined(PT_SETREGS) || defined(PT_GETREGS)
630 		if (!procfs_validregs(lp)) {
631 			lwkt_reltoken(&p->p_token);
632 			PRELE(p);
633 			return EINVAL;
634 		} else {
635 			iov.iov_base = addr;
636 			iov.iov_len = sizeof(struct reg);
637 			uio.uio_iov = &iov;
638 			uio.uio_iovcnt = 1;
639 			uio.uio_offset = 0;
640 			uio.uio_resid = sizeof(struct reg);
641 			uio.uio_segflg = UIO_SYSSPACE;
642 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
643 			uio.uio_td = curthread;
644 			t = procfs_doregs(curp, lp, NULL, &uio);
645 			lwkt_reltoken(&p->p_token);
646 			PRELE(p);
647 			return t;
648 		}
649 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
650 
651 #ifdef PT_SETFPREGS
652 	case PT_SETFPREGS:
653 		write = 1;
654 		/* fallthrough */
655 #endif /* PT_SETFPREGS */
656 #ifdef PT_GETFPREGS
657 	case PT_GETFPREGS:
658 		/* write = 0 above */
659 #endif /* PT_SETFPREGS */
660 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
661 		if (!procfs_validfpregs(lp)) {
662 			lwkt_reltoken(&p->p_token);
663 			PRELE(p);
664 			return EINVAL;
665 		} else {
666 			iov.iov_base = addr;
667 			iov.iov_len = sizeof(struct fpreg);
668 			uio.uio_iov = &iov;
669 			uio.uio_iovcnt = 1;
670 			uio.uio_offset = 0;
671 			uio.uio_resid = sizeof(struct fpreg);
672 			uio.uio_segflg = UIO_SYSSPACE;
673 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
674 			uio.uio_td = curthread;
675 			t = procfs_dofpregs(curp, lp, NULL, &uio);
676 			lwkt_reltoken(&p->p_token);
677 			PRELE(p);
678 			return t;
679 		}
680 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
681 
682 #ifdef PT_SETDBREGS
683 	case PT_SETDBREGS:
684 		write = 1;
685 		/* fallthrough */
686 #endif /* PT_SETDBREGS */
687 #ifdef PT_GETDBREGS
688 	case PT_GETDBREGS:
689 		/* write = 0 above */
690 #endif /* PT_SETDBREGS */
691 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
692 		if (!procfs_validdbregs(lp)) {
693 			lwkt_reltoken(&p->p_token);
694 			PRELE(p);
695 			return EINVAL;
696 		} else {
697 			iov.iov_base = addr;
698 			iov.iov_len = sizeof(struct dbreg);
699 			uio.uio_iov = &iov;
700 			uio.uio_iovcnt = 1;
701 			uio.uio_offset = 0;
702 			uio.uio_resid = sizeof(struct dbreg);
703 			uio.uio_segflg = UIO_SYSSPACE;
704 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
705 			uio.uio_td = curthread;
706 			t = procfs_dodbregs(curp, lp, NULL, &uio);
707 			lwkt_reltoken(&p->p_token);
708 			PRELE(p);
709 			return t;
710 		}
711 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
712 
713 	default:
714 		break;
715 	}
716 
717 	lwkt_reltoken(&p->p_token);
718 	PRELE(p);
719 
720 	return 0;
721 }
722 
723 int
724 trace_req(struct proc *p)
725 {
726 	return 1;
727 }
728 
729 /*
730  * stopevent()
731  *
732  * Stop a process because of a procfs event.  Stay stopped until p->p_step
733  * is cleared (cleared by PIOCCONT in procfs).
734  *
735  * MPSAFE
736  */
737 void
738 stopevent(struct proc *p, unsigned int event, unsigned int val)
739 {
740 	/*
741 	 * Set event info.  Recheck p_stops in case we are
742 	 * racing a close() on procfs.
743 	 */
744 	spin_lock(&p->p_spin);
745 	if ((p->p_stops & event) == 0) {
746 		spin_unlock(&p->p_spin);
747 		return;
748 	}
749 	p->p_xstat = val;
750 	p->p_stype = event;
751 	p->p_step = 1;
752 	tsleep_interlock(&p->p_step, 0);
753 	spin_unlock(&p->p_spin);
754 
755 	/*
756 	 * Wakeup any PIOCWAITing procs and wait for p_step to
757 	 * be cleared.
758 	 */
759 	for (;;) {
760 		wakeup(&p->p_stype);
761 		tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
762 		spin_lock(&p->p_spin);
763 		if (p->p_step == 0) {
764 			spin_unlock(&p->p_spin);
765 			break;
766 		}
767 		tsleep_interlock(&p->p_step, 0);
768 		spin_unlock(&p->p_spin);
769 	}
770 }
771 
772