xref: /dragonfly/sys/kern/sys_process.c (revision 2b3f93ea)
1 /*
2  * Copyright (c) 1994, Sean Eric Fagan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Sean Eric Fagan.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysmsg.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39 #include <sys/caps.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49 
50 #include <vfs/procfs/procfs.h>
51 
52 #include <sys/thread2.h>
53 #include <sys/spinlock2.h>
54 
55 /* use the equivalent procfs code */
56 #if 0
57 static int
58 pread (struct proc *procp, unsigned int addr, unsigned int *retval)
59 {
60 	int		rv;
61 	vm_map_t	map, tmap;
62 	vm_object_t	object;
63 	vm_map_backing_t ba;
64 	vm_offset_t	kva = 0;
65 	int		page_offset;	/* offset into page */
66 	vm_offset_t	pageno;		/* page number */
67 	vm_map_entry_t	out_entry;
68 	vm_prot_t	out_prot;
69 	int		wflags;
70 	vm_pindex_t	pindex;
71 	vm_pindex_t	pcount;
72 
73 	/* Map page into kernel space */
74 
75 	map = &procp->p_vmspace->vm_map;
76 
77 	page_offset = addr - trunc_page(addr);
78 	pageno = trunc_page(addr);
79 
80 	tmap = map;
81 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
82 			   &ba, &pindex, &pcount, &out_prot, &wflags);
83 	if (ba)
84 		object = ba->object;
85 	else
86 		object = NULL;
87 
88 
89 	if (rv != KERN_SUCCESS)
90 		return EINVAL;
91 
92 	vm_map_lookup_done (tmap, out_entry, 0);
93 
94 	/* Find space in kernel_map for the page we're interested in */
95 	rv = vm_map_find (kernel_map, object, NULL,
96 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
97 			  PAGE_SIZE, FALSE,
98 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
99 			  VM_PROT_ALL, VM_PROT_ALL, 0);
100 
101 	if (!rv) {
102 		vm_object_reference XXX (object);
103 
104 		/* wire the pages */
105 		rv = vm_map_kernel_wiring(kernel_map, kva, kva + PAGE_SIZE, 0);
106 		if (!rv) {
107 			*retval = 0;
108 			bcopy ((caddr_t)kva + page_offset,
109 			       retval, sizeof *retval);
110 		}
111 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
112 	}
113 
114 	return rv;
115 }
116 
117 static int
118 pwrite (struct proc *procp, unsigned int addr, unsigned int datum)
119 {
120 	int		rv;
121 	vm_map_t	map, tmap;
122 	vm_object_t	object;
123 	vm_map_backing_t ba;
124 	vm_offset_t	kva = 0;
125 	int		page_offset;	/* offset into page */
126 	vm_offset_t	pageno;		/* page number */
127 	vm_map_entry_t	out_entry;
128 	vm_prot_t	out_prot;
129 	int		wflags;
130 	vm_pindex_t	pindex;
131 	vm_pindex_t	pcount;
132 	boolean_t	fix_prot = 0;
133 
134 	/* Map page into kernel space */
135 
136 	map = &procp->p_vmspace->vm_map;
137 
138 	page_offset = addr - trunc_page(addr);
139 	pageno = trunc_page(addr);
140 
141 	/*
142 	 * Check the permissions for the area we're interested in.
143 	 */
144 
145 	if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
146 				     VM_PROT_WRITE, FALSE) == FALSE) {
147 		/*
148 		 * If the page was not writable, we make it so.
149 		 * XXX It is possible a page may *not* be read/executable,
150 		 * if a process changes that!
151 		 */
152 		fix_prot = 1;
153 		/* The page isn't writable, so let's try making it so... */
154 		if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
155 			VM_PROT_ALL, 0)) != KERN_SUCCESS)
156 		  return EFAULT;	/* I guess... */
157 	}
158 
159 	/*
160 	 * Now we need to get the page.  out_entry, out_prot, wflags, and
161 	 * single_use aren't used.  One would think the vm code would be
162 	 * a *bit* nicer...  We use tmap because vm_map_lookup() can
163 	 * change the map argument.
164 	 */
165 
166 	tmap = map;
167 	rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
168 			   &ba, &pindex, &pcount, &out_prot, &wflags);
169 	if (ba)
170 		object = ba->object;
171 	else
172 		object = NULL;
173 
174 	if (rv != KERN_SUCCESS)
175 		return EINVAL;
176 
177 	/*
178 	 * Okay, we've got the page.  Let's release tmap.
179 	 */
180 	vm_map_lookup_done (tmap, out_entry, 0);
181 
182 	/*
183 	 * Fault the page in...
184 	 */
185 	rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
186 	if (rv != KERN_SUCCESS)
187 		return EFAULT;
188 
189 	/* Find space in kernel_map for the page we're interested in */
190 	rv = vm_map_find (kernel_map, object, NULL,
191 			  IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
192 			  PAGE_SIZE, FALSE,
193 			  VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
194 			  VM_PROT_ALL, VM_PROT_ALL, 0);
195 	if (!rv) {
196 		vm_object_reference XXX (object);
197 
198 		/* wire the pages */
199 		rv = vm_map_kernel_wiring(kernel_map, kva, kva + PAGE_SIZE, 0);
200 		if (!rv) {
201 		  bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
202 		}
203 		vm_map_remove (kernel_map, kva, kva + PAGE_SIZE);
204 	}
205 
206 	if (fix_prot)
207 		vm_map_protect (map, pageno, pageno + PAGE_SIZE,
208 			VM_PROT_READ|VM_PROT_EXECUTE, 0);
209 	return rv;
210 }
211 #endif
212 
213 /*
214  * Process debugging system call.
215  *
216  * MPALMOSTSAFE
217  */
218 int
sys_ptrace(struct sysmsg * sysmsg,const struct ptrace_args * uap)219 sys_ptrace(struct sysmsg *sysmsg, const struct ptrace_args *uap)
220 {
221 	struct proc *p = curproc;
222 
223 	/*
224 	 * XXX this obfuscation is to reduce stack usage, but the register
225 	 * structs may be too large to put on the stack anyway.
226 	 */
227 	union {
228 		struct ptrace_io_desc piod;
229 		struct dbreg dbreg;
230 		struct fpreg fpreg;
231 		struct reg reg;
232 	} r;
233 	void *addr;
234 	int error = 0;
235 
236 	addr = &r;
237 	switch (uap->req) {
238 	case PT_GETREGS:
239 	case PT_GETFPREGS:
240 #ifdef PT_GETDBREGS
241 	case PT_GETDBREGS:
242 #endif
243 		break;
244 	case PT_SETREGS:
245 		error = copyin(uap->addr, &r.reg, sizeof r.reg);
246 		break;
247 	case PT_SETFPREGS:
248 		error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
249 		break;
250 #ifdef PT_SETDBREGS
251 	case PT_SETDBREGS:
252 		error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
253 		break;
254 #endif
255 	case PT_IO:
256 		error = copyin(uap->addr, &r.piod, sizeof r.piod);
257 		break;
258 	default:
259 		addr = uap->addr;
260 	}
261 	if (error)
262 		return (error);
263 
264 	error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
265 			&sysmsg->sysmsg_result);
266 	if (error)
267 		return (error);
268 
269 	switch (uap->req) {
270 	case PT_IO:
271 		(void)copyout(&r.piod, uap->addr, sizeof r.piod);
272 		break;
273 	case PT_GETREGS:
274 		error = copyout(&r.reg, uap->addr, sizeof r.reg);
275 		break;
276 	case PT_GETFPREGS:
277 		error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
278 		break;
279 #ifdef PT_GETDBREGS
280 	case PT_GETDBREGS:
281 		error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
282 		break;
283 #endif
284 	}
285 
286 	return (error);
287 }
288 
289 int
kern_ptrace(struct proc * curp,int req,pid_t pid,void * addr,int data,int * res)290 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
291 	    int data, int *res)
292 {
293 	struct proc *p, *pp;
294 	struct lwp *lp;
295 	struct iovec iov;
296 	struct uio uio;
297 	struct ptrace_io_desc *piod;
298 	int error = 0;
299 	int write, tmp;
300 	int t;
301 
302 	write = 0;
303 	if (req == PT_TRACE_ME) {
304 		p = curp;
305 		PHOLD(p);
306 	} else {
307 		if ((p = pfind(pid)) == NULL)
308 			return ESRCH;
309 	}
310 	if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
311 		PRELE(p);
312 		return (ESRCH);
313 	}
314 	if (p->p_flags & P_SYSTEM) {
315 		PRELE(p);
316 		return EINVAL;
317 	}
318 
319 	lwkt_gettoken(&p->p_token);
320 	/* Can't trace a process that's currently exec'ing. */
321 	if ((p->p_flags & P_INEXEC) != 0) {
322 		lwkt_reltoken(&p->p_token);
323 		PRELE(p);
324 		return EAGAIN;
325 	}
326 
327 	/*
328 	 * Permissions check
329 	 */
330 	switch (req) {
331 	case PT_TRACE_ME:
332 		/* Always legal. */
333 		break;
334 
335 	case PT_ATTACH:
336 		/* Self */
337 		if (p->p_pid == curp->p_pid) {
338 			lwkt_reltoken(&p->p_token);
339 			PRELE(p);
340 			return EINVAL;
341 		}
342 
343 		/* Already traced */
344 		if (p->p_flags & P_TRACED) {
345 			lwkt_reltoken(&p->p_token);
346 			PRELE(p);
347 			return EBUSY;
348 		}
349 
350 		if (curp->p_flags & P_TRACED)
351 			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
352 				if (pp == p) {
353 					lwkt_reltoken(&p->p_token);
354 					PRELE(p);
355 					return (EINVAL);
356 				}
357 
358 		/* not owned by you, has done setuid (unless you're root) */
359 		if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
360 		     (p->p_flags & P_SUGID)) {
361 			error = caps_priv_check(curp->p_ucred,
362 						SYSCAP_RESTRICTEDROOT);
363 			if (error) {
364 				lwkt_reltoken(&p->p_token);
365 				PRELE(p);
366 				return error;
367 			}
368 		}
369 
370 		/* can't trace init when securelevel > 0 */
371 		if (securelevel > 0 && p->p_pid == 1) {
372 			lwkt_reltoken(&p->p_token);
373 			PRELE(p);
374 			return EPERM;
375 		}
376 
377 		/* OK */
378 		break;
379 
380 	case PT_READ_I:
381 	case PT_READ_D:
382 	case PT_WRITE_I:
383 	case PT_WRITE_D:
384 	case PT_IO:
385 	case PT_CONTINUE:
386 	case PT_KILL:
387 	case PT_STEP:
388 	case PT_DETACH:
389 #ifdef PT_GETREGS
390 	case PT_GETREGS:
391 #endif
392 #ifdef PT_SETREGS
393 	case PT_SETREGS:
394 #endif
395 #ifdef PT_GETFPREGS
396 	case PT_GETFPREGS:
397 #endif
398 #ifdef PT_SETFPREGS
399 	case PT_SETFPREGS:
400 #endif
401 #ifdef PT_GETDBREGS
402 	case PT_GETDBREGS:
403 #endif
404 #ifdef PT_SETDBREGS
405 	case PT_SETDBREGS:
406 #endif
407 		/* not being traced... */
408 		if ((p->p_flags & P_TRACED) == 0) {
409 			lwkt_reltoken(&p->p_token);
410 			PRELE(p);
411 			return EPERM;
412 		}
413 
414 		/* not being traced by YOU */
415 		if (p->p_pptr != curp) {
416 			lwkt_reltoken(&p->p_token);
417 			PRELE(p);
418 			return EBUSY;
419 		}
420 
421 		/* not currently stopped */
422 		if (p->p_stat != SSTOP ||
423 		    (p->p_flags & P_WAITED) == 0) {
424 			lwkt_reltoken(&p->p_token);
425 			PRELE(p);
426 			return EBUSY;
427 		}
428 
429 		/* OK */
430 		break;
431 
432 	default:
433 		lwkt_reltoken(&p->p_token);
434 		PRELE(p);
435 		return EINVAL;
436 	}
437 
438 	/* XXX lwp */
439 	lp = FIRST_LWP_IN_PROC(p);
440 	if (lp == NULL) {
441 		lwkt_reltoken(&p->p_token);
442 		PRELE(p);
443 		return EINVAL;
444 	}
445 
446 #ifdef FIX_SSTEP
447 	/*
448 	 * Single step fixup ala procfs
449 	 */
450 	FIX_SSTEP(lp);
451 #endif
452 
453 	/*
454 	 * Actually do the requests
455 	 */
456 
457 	*res = 0;
458 
459 	switch (req) {
460 	case PT_TRACE_ME:
461 		/* set my trace flag and "owner" so it can read/write me */
462 		p->p_flags |= P_TRACED;
463 		p->p_oppid = p->p_pptr->p_pid;
464 		lwkt_reltoken(&p->p_token);
465 		PRELE(p);
466 		return 0;
467 
468 	case PT_ATTACH:
469 		/* security check done above */
470 		p->p_flags |= P_TRACED;
471 		p->p_oppid = p->p_pptr->p_pid;
472 		proc_reparent(p, curp);
473 		data = SIGSTOP;
474 		goto sendsig;	/* in PT_CONTINUE below */
475 
476 	case PT_STEP:
477 	case PT_CONTINUE:
478 	case PT_DETACH:
479 		/* Zero means do not send any signal */
480 		if (data < 0 || data >= _SIG_MAXSIG) {
481 			lwkt_reltoken(&p->p_token);
482 			PRELE(p);
483 			return EINVAL;
484 		}
485 
486 		LWPHOLD(lp);
487 
488 		if (req == PT_STEP) {
489 			if ((error = ptrace_single_step (lp))) {
490 				LWPRELE(lp);
491 				lwkt_reltoken(&p->p_token);
492 				PRELE(p);
493 				return error;
494 			}
495 		}
496 
497 		if (addr != (void *)1) {
498 			if ((error = ptrace_set_pc (lp, (u_long)addr))) {
499 				LWPRELE(lp);
500 				lwkt_reltoken(&p->p_token);
501 				PRELE(p);
502 				return error;
503 			}
504 		}
505 		LWPRELE(lp);
506 
507 		if (req == PT_DETACH) {
508 			/* reset process parent */
509 			if (p->p_oppid != p->p_pptr->p_pid) {
510 				struct proc *pp;
511 
512 				pp = pfind(p->p_oppid);
513 				if (pp) {
514 					proc_reparent(p, pp);
515 					PRELE(pp);
516 				}
517 			}
518 
519 			p->p_flags &= ~(P_TRACED | P_WAITED);
520 			p->p_oppid = 0;
521 
522 			/* should we send SIGCHLD? */
523 		}
524 
525 	sendsig:
526 		/*
527 		 * Deliver or queue signal.  If the process is stopped
528 		 * force it to be SACTIVE again.
529 		 */
530 		crit_enter();
531 		if (p->p_stat == SSTOP) {
532 			p->p_xstat = data;
533 			proc_unstop(p, SSTOP);
534 		} else if (data) {
535 			ksignal(p, data);
536 		}
537 		crit_exit();
538 		lwkt_reltoken(&p->p_token);
539 		PRELE(p);
540 		return 0;
541 
542 	case PT_WRITE_I:
543 	case PT_WRITE_D:
544 		write = 1;
545 		/* fallthrough */
546 	case PT_READ_I:
547 	case PT_READ_D:
548 		/*
549 		 * NOTE! uio_offset represents the offset in the target
550 		 * process.  The iov is in the current process (the guy
551 		 * making the ptrace call) so uio_td must be the current
552 		 * process (though for a SYSSPACE transfer it doesn't
553 		 * really matter).
554 		 */
555 		tmp = 0;
556 		/* write = 0 set above */
557 		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
558 		iov.iov_len = sizeof(int);
559 		uio.uio_iov = &iov;
560 		uio.uio_iovcnt = 1;
561 		uio.uio_offset = (off_t)(uintptr_t)addr;
562 		uio.uio_resid = sizeof(int);
563 		uio.uio_segflg = UIO_SYSSPACE;
564 		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
565 		uio.uio_td = curthread;
566 		error = procfs_domem(curp, lp, NULL, &uio);
567 		if (uio.uio_resid != 0) {
568 			/*
569 			 * XXX procfs_domem() doesn't currently return ENOSPC,
570 			 * so I think write() can bogusly return 0.
571 			 * XXX what happens for short writes?  We don't want
572 			 * to write partial data.
573 			 * XXX procfs_domem() returns EPERM for other invalid
574 			 * addresses.  Convert this to EINVAL.  Does this
575 			 * clobber returns of EPERM for other reasons?
576 			 */
577 			if (error == 0 || error == ENOSPC || error == EPERM)
578 				error = EINVAL;	/* EOF */
579 		}
580 		if (!write)
581 			*res = tmp;
582 		lwkt_reltoken(&p->p_token);
583 		PRELE(p);
584 		return (error);
585 
586 	case PT_IO:
587 		/*
588 		 * NOTE! uio_offset represents the offset in the target
589 		 * process.  The iov is in the current process (the guy
590 		 * making the ptrace call) so uio_td must be the current
591 		 * process.
592 		 */
593 		piod = addr;
594 		iov.iov_base = piod->piod_addr;
595 		iov.iov_len = piod->piod_len;
596 		uio.uio_iov = &iov;
597 		uio.uio_iovcnt = 1;
598 		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
599 		uio.uio_resid = piod->piod_len;
600 		uio.uio_segflg = UIO_USERSPACE;
601 		uio.uio_td = curthread;
602 		switch (piod->piod_op) {
603 		case PIOD_READ_D:
604 		case PIOD_READ_I:
605 			uio.uio_rw = UIO_READ;
606 			break;
607 		case PIOD_WRITE_D:
608 		case PIOD_WRITE_I:
609 			uio.uio_rw = UIO_WRITE;
610 			break;
611 		default:
612 			lwkt_reltoken(&p->p_token);
613 			PRELE(p);
614 			return (EINVAL);
615 		}
616 		error = procfs_domem(curp, lp, NULL, &uio);
617 		piod->piod_len -= uio.uio_resid;
618 		lwkt_reltoken(&p->p_token);
619 		PRELE(p);
620 		return (error);
621 
622 	case PT_KILL:
623 		data = SIGKILL;
624 		goto sendsig;	/* in PT_CONTINUE above */
625 
626 #ifdef PT_SETREGS
627 	case PT_SETREGS:
628 		write = 1;
629 		/* fallthrough */
630 #endif /* PT_SETREGS */
631 #ifdef PT_GETREGS
632 	case PT_GETREGS:
633 		/* write = 0 above */
634 #endif /* PT_SETREGS */
635 #if defined(PT_SETREGS) || defined(PT_GETREGS)
636 		if (!procfs_validregs(lp)) {
637 			lwkt_reltoken(&p->p_token);
638 			PRELE(p);
639 			return EINVAL;
640 		} else {
641 			iov.iov_base = addr;
642 			iov.iov_len = sizeof(struct reg);
643 			uio.uio_iov = &iov;
644 			uio.uio_iovcnt = 1;
645 			uio.uio_offset = 0;
646 			uio.uio_resid = sizeof(struct reg);
647 			uio.uio_segflg = UIO_SYSSPACE;
648 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
649 			uio.uio_td = curthread;
650 			t = procfs_doregs(curp, lp, NULL, &uio);
651 			lwkt_reltoken(&p->p_token);
652 			PRELE(p);
653 			return t;
654 		}
655 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
656 
657 #ifdef PT_SETFPREGS
658 	case PT_SETFPREGS:
659 		write = 1;
660 		/* fallthrough */
661 #endif /* PT_SETFPREGS */
662 #ifdef PT_GETFPREGS
663 	case PT_GETFPREGS:
664 		/* write = 0 above */
665 #endif /* PT_SETFPREGS */
666 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
667 		if (!procfs_validfpregs(lp)) {
668 			lwkt_reltoken(&p->p_token);
669 			PRELE(p);
670 			return EINVAL;
671 		} else {
672 			iov.iov_base = addr;
673 			iov.iov_len = sizeof(struct fpreg);
674 			uio.uio_iov = &iov;
675 			uio.uio_iovcnt = 1;
676 			uio.uio_offset = 0;
677 			uio.uio_resid = sizeof(struct fpreg);
678 			uio.uio_segflg = UIO_SYSSPACE;
679 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
680 			uio.uio_td = curthread;
681 			t = procfs_dofpregs(curp, lp, NULL, &uio);
682 			lwkt_reltoken(&p->p_token);
683 			PRELE(p);
684 			return t;
685 		}
686 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
687 
688 #ifdef PT_SETDBREGS
689 	case PT_SETDBREGS:
690 		write = 1;
691 		/* fallthrough */
692 #endif /* PT_SETDBREGS */
693 #ifdef PT_GETDBREGS
694 	case PT_GETDBREGS:
695 		/* write = 0 above */
696 #endif /* PT_SETDBREGS */
697 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
698 		if (!procfs_validdbregs(lp)) {
699 			lwkt_reltoken(&p->p_token);
700 			PRELE(p);
701 			return EINVAL;
702 		} else {
703 			iov.iov_base = addr;
704 			iov.iov_len = sizeof(struct dbreg);
705 			uio.uio_iov = &iov;
706 			uio.uio_iovcnt = 1;
707 			uio.uio_offset = 0;
708 			uio.uio_resid = sizeof(struct dbreg);
709 			uio.uio_segflg = UIO_SYSSPACE;
710 			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
711 			uio.uio_td = curthread;
712 			t = procfs_dodbregs(curp, lp, NULL, &uio);
713 			lwkt_reltoken(&p->p_token);
714 			PRELE(p);
715 			return t;
716 		}
717 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
718 
719 	default:
720 		break;
721 	}
722 
723 	lwkt_reltoken(&p->p_token);
724 	PRELE(p);
725 
726 	return 0;
727 }
728 
729 int
trace_req(struct proc * p)730 trace_req(struct proc *p)
731 {
732 	return 1;
733 }
734 
735 /*
736  * stopevent()
737  *
738  * Stop a process because of a procfs event.  Stay stopped until p->p_step
739  * is cleared (cleared by PIOCCONT in procfs).
740  *
741  * MPSAFE
742  */
743 void
stopevent(struct proc * p,unsigned int event,unsigned int val)744 stopevent(struct proc *p, unsigned int event, unsigned int val)
745 {
746 	/*
747 	 * Set event info.  Recheck p_stops in case we are
748 	 * racing a close() on procfs.
749 	 */
750 	spin_lock(&p->p_spin);
751 	if ((p->p_stops & event) == 0) {
752 		spin_unlock(&p->p_spin);
753 		return;
754 	}
755 	p->p_xstat = val;
756 	p->p_stype = event;
757 	p->p_step = 1;
758 	tsleep_interlock(&p->p_step, 0);
759 	spin_unlock(&p->p_spin);
760 
761 	/*
762 	 * Wakeup any PIOCWAITing procs and wait for p_step to
763 	 * be cleared.
764 	 */
765 	for (;;) {
766 		wakeup(&p->p_stype);
767 		tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
768 		spin_lock(&p->p_spin);
769 		if (p->p_step == 0) {
770 			spin_unlock(&p->p_spin);
771 			break;
772 		}
773 		tsleep_interlock(&p->p_step, 0);
774 		spin_unlock(&p->p_spin);
775 	}
776 }
777 
778