xref: /dragonfly/sys/kern/sys_process.c (revision c9c5aa9e)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysmsg.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 
50 #include <vfs/procfs/procfs.h>
51 
52 #include <sys/thread2.h>
53 #include <sys/spinlock2.h>
54 
55 /* use the equivalent procfs code */
56 #if 0
57 static int
58 pread (struct proc *procp, unsigned int addr, unsigned int *retval)
59 {
60 	int		rv;
61 	vm_map_t	map, tmap;
62 	vm_object_t	object;
63 	vm_map_backing_t ba;
64 	vm_offset_t	kva = 0;
65 	int		page_offset;	/* offset into page */
66 	vm_offset_t	pageno;		/* page number */
67 	vm_map_entry_t	out_entry;
68 	vm_prot_t	out_prot;
69 	int		wflags;
70 	vm_pindex_t	pindex;
71 	vm_pindex_t	pcount;
72 
73 	/* Map page into kernel space */
74 
75 	map = &procp->p_vmspace->vm_map;
76 
77 	page_offset = addr - trunc_page(addr);
78 	pageno = trunc_page(addr);
79 
80 	tmap = map;
81 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
82 			   &ba, &pindex, &pcount, &out_prot, &wflags);
83 	if (ba)
84 		object = ba->object;
85 	else
86 		object = NULL;
87 
88 
89 	if (rv != KERN_SUCCESS)
90 		return EINVAL;
91 
92 	vm_map_lookup_done (tmap, out_entry, 0);
93 
94 	/* Find space in kernel_map for the page we're interested in */
95 	rv = vm_map_find (&kernel_map, object, NULL,
96 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
97 			  PAGE_SIZE, FALSE,
98 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
99 			  VM_PROT_ALL, VM_PROT_ALL, 0);
100 
101 	if (!rv) {
102 		vm_object_reference XXX (object);
103 
104 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
105 		if (!rv) {
106 			*retval = 0;
107 			bcopy ((caddr_t)kva + page_offset,
108 			       retval, sizeof *retval);
109 		}
110 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
111 	}
112 
113 	return rv;
114 }
115 
116 static int
117 pwrite (struct proc *procp, unsigned int addr, unsigned int datum)
118 {
119 	int		rv;
120 	vm_map_t	map, tmap;
121 	vm_object_t	object;
122 	vm_map_backing_t ba;
123 	vm_offset_t	kva = 0;
124 	int		page_offset;	/* offset into page */
125 	vm_offset_t	pageno;		/* page number */
126 	vm_map_entry_t	out_entry;
127 	vm_prot_t	out_prot;
128 	int		wflags;
129 	vm_pindex_t	pindex;
130 	vm_pindex_t	pcount;
131 	boolean_t	fix_prot = 0;
132 
133 	/* Map page into kernel space */
134 
135 	map = &procp->p_vmspace->vm_map;
136 
137 	page_offset = addr - trunc_page(addr);
138 	pageno = trunc_page(addr);
139 
140 	/*
141 	 * Check the permissions for the area we're interested in.
142 	 */
143 
144 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
145 				     VM_PROT_WRITE, FALSE) == FALSE) {
146 		/*
147 		 * If the page was not writable, we make it so.
148 		 * XXX It is possible a page may *not* be read/executable,
149 		 * if a process changes that!
150 		 */
151 		fix_prot = 1;
152 		/* The page isn't writable, so let's try making it so... */
153 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
154 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
155 		  return EFAULT;	/* I guess... */
156 	}
157 
158 	/*
159 	 * Now we need to get the page.  out_entry, out_prot, wflags, and
160 	 * single_use aren't used.  One would think the vm code would be
161 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
162 	 * change the map argument.
163 	 */
164 
165 	tmap = map;
166 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
167 			   &ba, &pindex, &pcount, &out_prot, &wflags);
168 	if (ba)
169 		object = ba->object;
170 	else
171 		object = NULL;
172 
173 	if (rv != KERN_SUCCESS)
174 		return EINVAL;
175 
176 	/*
177 	 * Okay, we've got the page.  Let's release tmap.
178 	 */
179 	vm_map_lookup_done (tmap, out_entry, 0);
180 
181 	/*
182 	 * Fault the page in...
183 	 */
184 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
185 	if (rv != KERN_SUCCESS)
186 		return EFAULT;
187 
188 	/* Find space in kernel_map for the page we're interested in */
189 	rv = vm_map_find (&kernel_map, object, NULL,
190 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
191 			  PAGE_SIZE, FALSE,
192 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
193 			  VM_PROT_ALL, VM_PROT_ALL, 0);
194 	if (!rv) {
195 		vm_object_reference XXX (object);
196 
197 		rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
198 		if (!rv) {
199 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
200 		}
201 		vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
202 	}
203 
204 	if (fix_prot)
205 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
206 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
207 	return rv;
208 }
209 #endif
210 
211 /*
212  * Process debugging system call.
213  *
214  * MPALMOSTSAFE
215  */
216 int
217 sys_ptrace(struct sysmsg *sysmsg, const struct ptrace_args *uap)
218 {
219 	struct proc *p = curproc;
220 
221 	/*
222 	 * XXX this obfuscation is to reduce stack usage, but the register
223 	 * structs may be too large to put on the stack anyway.
224 	 */
225 	union {
226 		struct ptrace_io_desc piod;
227 		struct dbreg dbreg;
228 		struct fpreg fpreg;
229 		struct reg reg;
230 	} r;
231 	void *addr;
232 	int error = 0;
233 
234 	addr = &r;
235 	switch (uap->req) {
236 	case PT_GETREGS:
237 	case PT_GETFPREGS:
238 #ifdef PT_GETDBREGS
239 	case PT_GETDBREGS:
240 #endif
241 		break;
242 	case PT_SETREGS:
243 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
244 		break;
245 	case PT_SETFPREGS:
246 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
247 		break;
248 #ifdef PT_SETDBREGS
249 	case PT_SETDBREGS:
250 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
251 		break;
252 #endif
253 	case PT_IO:
254 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
255 		break;
256 	default:
257 		addr = uap->addr;
258 	}
259 	if (error)
260 		return (error);
261 
262 	error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
263 			&sysmsg->sysmsg_result);
264 	if (error)
265 		return (error);
266 
267 	switch (uap->req) {
268 	case PT_IO:
269 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
270 		break;
271 	case PT_GETREGS:
272 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
273 		break;
274 	case PT_GETFPREGS:
275 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
276 		break;
277 #ifdef PT_GETDBREGS
278 	case PT_GETDBREGS:
279 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
280 		break;
281 #endif
282 	}
283 
284 	return (error);
285 }
286 
287 int
288 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
289 	    int data, int *res)
290 {
291 	struct proc *p, *pp;
292 	struct lwp *lp;
293 	struct iovec iov;
294 	struct uio uio;
295 	struct ptrace_io_desc *piod;
296 	int error = 0;
297 	int write, tmp;
298 	int t;
299 
300 	write = 0;
301 	if (req == PT_TRACE_ME) {
302 		p = curp;
303 		PHOLD(p);
304 	} else {
305 		if ((p = pfind(pid)) == NULL)
306 			return ESRCH;
307 	}
308 	if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
309 		PRELE(p);
310 		return (ESRCH);
311 	}
312 	if (p->p_flags & P_SYSTEM) {
313 		PRELE(p);
314 		return EINVAL;
315 	}
316 
317 	lwkt_gettoken(&p->p_token);
318 	/* Can't trace a process that's currently exec'ing. */
319 	if ((p->p_flags & P_INEXEC) != 0) {
320 		lwkt_reltoken(&p->p_token);
321 		PRELE(p);
322 		return EAGAIN;
323 	}
324 
325 	/*
326 	 * Permissions check
327 	 */
328 	switch (req) {
329 	case PT_TRACE_ME:
330 		/* Always legal. */
331 		break;
332 
333 	case PT_ATTACH:
334 		/* Self */
335 		if (p->p_pid == curp->p_pid) {
336 			lwkt_reltoken(&p->p_token);
337 			PRELE(p);
338 			return EINVAL;
339 		}
340 
341 		/* Already traced */
342 		if (p->p_flags & P_TRACED) {
343 			lwkt_reltoken(&p->p_token);
344 			PRELE(p);
345 			return EBUSY;
346 		}
347 
348 		if (curp->p_flags & P_TRACED)
349 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
350 				if (pp == p) {
351 					lwkt_reltoken(&p->p_token);
352 					PRELE(p);
353 					return (EINVAL);
354 				}
355 
356 		/* not owned by you, has done setuid (unless you're root) */
357 		if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
358 		     (p->p_flags & P_SUGID)) {
359 			if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
360 				lwkt_reltoken(&p->p_token);
361 				PRELE(p);
362 				return error;
363 			}
364 		}
365 
366 		/* can't trace init when securelevel > 0 */
367 		if (securelevel > 0 && p->p_pid == 1) {
368 			lwkt_reltoken(&p->p_token);
369 			PRELE(p);
370 			return EPERM;
371 		}
372 
373 		/* OK */
374 		break;
375 
376 	case PT_READ_I:
377 	case PT_READ_D:
378 	case PT_WRITE_I:
379 	case PT_WRITE_D:
380 	case PT_IO:
381 	case PT_CONTINUE:
382 	case PT_KILL:
383 	case PT_STEP:
384 	case PT_DETACH:
385 #ifdef PT_GETREGS
386 	case PT_GETREGS:
387 #endif
388 #ifdef PT_SETREGS
389 	case PT_SETREGS:
390 #endif
391 #ifdef PT_GETFPREGS
392 	case PT_GETFPREGS:
393 #endif
394 #ifdef PT_SETFPREGS
395 	case PT_SETFPREGS:
396 #endif
397 #ifdef PT_GETDBREGS
398 	case PT_GETDBREGS:
399 #endif
400 #ifdef PT_SETDBREGS
401 	case PT_SETDBREGS:
402 #endif
403 		/* not being traced... */
404 		if ((p->p_flags & P_TRACED) == 0) {
405 			lwkt_reltoken(&p->p_token);
406 			PRELE(p);
407 			return EPERM;
408 		}
409 
410 		/* not being traced by YOU */
411 		if (p->p_pptr != curp) {
412 			lwkt_reltoken(&p->p_token);
413 			PRELE(p);
414 			return EBUSY;
415 		}
416 
417 		/* not currently stopped */
418 		if (p->p_stat != SSTOP ||
419 		    (p->p_flags & P_WAITED) == 0) {
420 			lwkt_reltoken(&p->p_token);
421 			PRELE(p);
422 			return EBUSY;
423 		}
424 
425 		/* OK */
426 		break;
427 
428 	default:
429 		lwkt_reltoken(&p->p_token);
430 		PRELE(p);
431 		return EINVAL;
432 	}
433 
434 	/* XXX lwp */
435 	lp = FIRST_LWP_IN_PROC(p);
436 	if (lp == NULL) {
437 		lwkt_reltoken(&p->p_token);
438 		PRELE(p);
439 		return EINVAL;
440 	}
441 
442 #ifdef FIX_SSTEP
443 	/*
444 	 * Single step fixup ala procfs
445 	 */
446 	FIX_SSTEP(lp);
447 #endif
448 
449 	/*
450 	 * Actually do the requests
451 	 */
452 
453 	*res = 0;
454 
455 	switch (req) {
456 	case PT_TRACE_ME:
457 		/* set my trace flag and "owner" so it can read/write me */
458 		p->p_flags |= P_TRACED;
459 		p->p_oppid = p->p_pptr->p_pid;
460 		lwkt_reltoken(&p->p_token);
461 		PRELE(p);
462 		return 0;
463 
464 	case PT_ATTACH:
465 		/* security check done above */
466 		p->p_flags |= P_TRACED;
467 		p->p_oppid = p->p_pptr->p_pid;
468 		proc_reparent(p, curp);
469 		data = SIGSTOP;
470 		goto sendsig;	/* in PT_CONTINUE below */
471 
472 	case PT_STEP:
473 	case PT_CONTINUE:
474 	case PT_DETACH:
475 		/* Zero means do not send any signal */
476 		if (data < 0 || data >= _SIG_MAXSIG) {
477 			lwkt_reltoken(&p->p_token);
478 			PRELE(p);
479 			return EINVAL;
480 		}
481 
482 		LWPHOLD(lp);
483 
484 		if (req == PT_STEP) {
485 			if ((error = ptrace_single_step (lp))) {
486 				LWPRELE(lp);
487 				lwkt_reltoken(&p->p_token);
488 				PRELE(p);
489 				return error;
490 			}
491 		}
492 
493 		if (addr != (void *)1) {
494 			if ((error = ptrace_set_pc (lp, (u_long)addr))) {
495 				LWPRELE(lp);
496 				lwkt_reltoken(&p->p_token);
497 				PRELE(p);
498 				return error;
499 			}
500 		}
501 		LWPRELE(lp);
502 
503 		if (req == PT_DETACH) {
504 			/* reset process parent */
505 			if (p->p_oppid != p->p_pptr->p_pid) {
506 				struct proc *pp;
507 
508 				pp = pfind(p->p_oppid);
509 				if (pp) {
510 					proc_reparent(p, pp);
511 					PRELE(pp);
512 				}
513 			}
514 
515 			p->p_flags &= ~(P_TRACED | P_WAITED);
516 			p->p_oppid = 0;
517 
518 			/* should we send SIGCHLD? */
519 		}
520 
521 	sendsig:
522 		/*
523 		 * Deliver or queue signal.  If the process is stopped
524 		 * force it to be SACTIVE again.
525 		 */
526 		crit_enter();
527 		if (p->p_stat == SSTOP) {
528 			p->p_xstat = data;
529 			proc_unstop(p, SSTOP);
530 		} else if (data) {
531 			ksignal(p, data);
532 		}
533 		crit_exit();
534 		lwkt_reltoken(&p->p_token);
535 		PRELE(p);
536 		return 0;
537 
538 	case PT_WRITE_I:
539 	case PT_WRITE_D:
540 		write = 1;
541 		/* fallthrough */
542 	case PT_READ_I:
543 	case PT_READ_D:
544 		/*
545 		 * NOTE! uio_offset represents the offset in the target
546 		 * process.  The iov is in the current process (the guy
547 		 * making the ptrace call) so uio_td must be the current
548 		 * process (though for a SYSSPACE transfer it doesn't
549 		 * really matter).
550 		 */
551 		tmp = 0;
552 		/* write = 0 set above */
553 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
554 		iov.iov_len = sizeof(int);
555 		uio.uio_iov = &iov;
556 		uio.uio_iovcnt = 1;
557 		uio.uio_offset = (off_t)(uintptr_t)addr;
558 		uio.uio_resid = sizeof(int);
559 		uio.uio_segflg = UIO_SYSSPACE;
560 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
561 		uio.uio_td = curthread;
562 		error = procfs_domem(curp, lp, NULL, &uio);
563 		if (uio.uio_resid != 0) {
564 			/*
565 			 * XXX procfs_domem() doesn't currently return ENOSPC,
566 			 * so I think write() can bogusly return 0.
567 			 * XXX what happens for short writes?  We don't want
568 			 * to write partial data.
569 			 * XXX procfs_domem() returns EPERM for other invalid
570 			 * addresses.  Convert this to EINVAL.  Does this
571 			 * clobber returns of EPERM for other reasons?
572 			 */
573 			if (error == 0 || error == ENOSPC || error == EPERM)
574 				error = EINVAL;	/* EOF */
575 		}
576 		if (!write)
577 			*res = tmp;
578 		lwkt_reltoken(&p->p_token);
579 		PRELE(p);
580 		return (error);
581 
582 	case PT_IO:
583 		/*
584 		 * NOTE! uio_offset represents the offset in the target
585 		 * process.  The iov is in the current process (the guy
586 		 * making the ptrace call) so uio_td must be the current
587 		 * process.
588 		 */
589 		piod = addr;
590 		iov.iov_base = piod->piod_addr;
591 		iov.iov_len = piod->piod_len;
592 		uio.uio_iov = &iov;
593 		uio.uio_iovcnt = 1;
594 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
595 		uio.uio_resid = piod->piod_len;
596 		uio.uio_segflg = UIO_USERSPACE;
597 		uio.uio_td = curthread;
598 		switch (piod->piod_op) {
599 		case PIOD_READ_D:
600 		case PIOD_READ_I:
601 			uio.uio_rw = UIO_READ;
602 			break;
603 		case PIOD_WRITE_D:
604 		case PIOD_WRITE_I:
605 			uio.uio_rw = UIO_WRITE;
606 			break;
607 		default:
608 			lwkt_reltoken(&p->p_token);
609 			PRELE(p);
610 			return (EINVAL);
611 		}
612 		error = procfs_domem(curp, lp, NULL, &uio);
613 		piod->piod_len -= uio.uio_resid;
614 		lwkt_reltoken(&p->p_token);
615 		PRELE(p);
616 		return (error);
617 
618 	case PT_KILL:
619 		data = SIGKILL;
620 		goto sendsig;	/* in PT_CONTINUE above */
621 
622 #ifdef PT_SETREGS
623 	case PT_SETREGS:
624 		write = 1;
625 		/* fallthrough */
626 #endif /* PT_SETREGS */
627 #ifdef PT_GETREGS
628 	case PT_GETREGS:
629 		/* write = 0 above */
630 #endif /* PT_SETREGS */
631 #if defined(PT_SETREGS) || defined(PT_GETREGS)
632 		if (!procfs_validregs(lp)) {
633 			lwkt_reltoken(&p->p_token);
634 			PRELE(p);
635 			return EINVAL;
636 		} else {
637 			iov.iov_base = addr;
638 			iov.iov_len = sizeof(struct reg);
639 			uio.uio_iov = &iov;
640 			uio.uio_iovcnt = 1;
641 			uio.uio_offset = 0;
642 			uio.uio_resid = sizeof(struct reg);
643 			uio.uio_segflg = UIO_SYSSPACE;
644 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
645 			uio.uio_td = curthread;
646 			t = procfs_doregs(curp, lp, NULL, &uio);
647 			lwkt_reltoken(&p->p_token);
648 			PRELE(p);
649 			return t;
650 		}
651 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
652 
653 #ifdef PT_SETFPREGS
654 	case PT_SETFPREGS:
655 		write = 1;
656 		/* fallthrough */
657 #endif /* PT_SETFPREGS */
658 #ifdef PT_GETFPREGS
659 	case PT_GETFPREGS:
660 		/* write = 0 above */
661 #endif /* PT_SETFPREGS */
662 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
663 		if (!procfs_validfpregs(lp)) {
664 			lwkt_reltoken(&p->p_token);
665 			PRELE(p);
666 			return EINVAL;
667 		} else {
668 			iov.iov_base = addr;
669 			iov.iov_len = sizeof(struct fpreg);
670 			uio.uio_iov = &iov;
671 			uio.uio_iovcnt = 1;
672 			uio.uio_offset = 0;
673 			uio.uio_resid = sizeof(struct fpreg);
674 			uio.uio_segflg = UIO_SYSSPACE;
675 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
676 			uio.uio_td = curthread;
677 			t = procfs_dofpregs(curp, lp, NULL, &uio);
678 			lwkt_reltoken(&p->p_token);
679 			PRELE(p);
680 			return t;
681 		}
682 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
683 
684 #ifdef PT_SETDBREGS
685 	case PT_SETDBREGS:
686 		write = 1;
687 		/* fallthrough */
688 #endif /* PT_SETDBREGS */
689 #ifdef PT_GETDBREGS
690 	case PT_GETDBREGS:
691 		/* write = 0 above */
692 #endif /* PT_SETDBREGS */
693 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
694 		if (!procfs_validdbregs(lp)) {
695 			lwkt_reltoken(&p->p_token);
696 			PRELE(p);
697 			return EINVAL;
698 		} else {
699 			iov.iov_base = addr;
700 			iov.iov_len = sizeof(struct dbreg);
701 			uio.uio_iov = &iov;
702 			uio.uio_iovcnt = 1;
703 			uio.uio_offset = 0;
704 			uio.uio_resid = sizeof(struct dbreg);
705 			uio.uio_segflg = UIO_SYSSPACE;
706 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
707 			uio.uio_td = curthread;
708 			t = procfs_dodbregs(curp, lp, NULL, &uio);
709 			lwkt_reltoken(&p->p_token);
710 			PRELE(p);
711 			return t;
712 		}
713 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
714 
715 	default:
716 		break;
717 	}
718 
719 	lwkt_reltoken(&p->p_token);
720 	PRELE(p);
721 
722 	return 0;
723 }
724 
725 int
726 trace_req(struct proc *p)
727 {
728 	return 1;
729 }
730 
731 /*
732  * stopevent()
733  *
734  * Stop a process because of a procfs event.  Stay stopped until p->p_step
735  * is cleared (cleared by PIOCCONT in procfs).
736  *
737  * MPSAFE
738  */
739 void
740 stopevent(struct proc *p, unsigned int event, unsigned int val)
741 {
742 	/*
743 	 * Set event info.  Recheck p_stops in case we are
744 	 * racing a close() on procfs.
745 	 */
746 	spin_lock(&p->p_spin);
747 	if ((p->p_stops & event) == 0) {
748 		spin_unlock(&p->p_spin);
749 		return;
750 	}
751 	p->p_xstat = val;
752 	p->p_stype = event;
753 	p->p_step = 1;
754 	tsleep_interlock(&p->p_step, 0);
755 	spin_unlock(&p->p_spin);
756 
757 	/*
758 	 * Wakeup any PIOCWAITing procs and wait for p_step to
759 	 * be cleared.
760 	 */
761 	for (;;) {
762 		wakeup(&p->p_stype);
763 		tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
764 		spin_lock(&p->p_spin);
765 		if (p->p_step == 0) {
766 			spin_unlock(&p->p_spin);
767 			break;
768 		}
769 		tsleep_interlock(&p->p_step, 0);
770 		spin_unlock(&p->p_spin);
771 	}
772 }
773 
774