1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /* Portions Copyright 2013 Justin Hibbits */
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/fasttrap_isa.h>
28 #include <sys/fasttrap_impl.h>
29 #include <sys/dtrace.h>
30 #include <sys/dtrace_impl.h>
31 #include <cddl/dev/dtrace/dtrace_cddl.h>
32 #include <sys/proc.h>
33 #include <sys/types.h>
34 #include <sys/uio.h>
35 #include <sys/ptrace.h>
36 #include <sys/sysent.h>
37 
38 #define OP(x)	((x) >> 26)
39 #define OPX(x)	(((x) >> 2) & 0x3FF)
40 #define OP_BO(x) (((x) & 0x03E00000) >> 21)
41 #define OP_BI(x) (((x) & 0x001F0000) >> 16)
42 #define OP_RS(x) (((x) & 0x03E00000) >> 21)
43 #define OP_RA(x) (((x) & 0x001F0000) >> 16)
44 #define OP_RB(x) (((x) & 0x0000F100) >> 11)
45 
46 
47 static int
48 proc_ops(int op, proc_t *p, void *kaddr, off_t uaddr, size_t len)
49 {
50 	struct iovec iov;
51 	struct uio uio;
52 
53 	iov.iov_base = kaddr;
54 	iov.iov_len = len;
55 	uio.uio_offset = uaddr;
56 	uio.uio_iov = &iov;
57 	uio.uio_resid = len;
58 	uio.uio_iovcnt = 1;
59 	uio.uio_segflg = UIO_SYSSPACE;
60 	uio.uio_td = curthread;
61 	uio.uio_rw = op;
62 	PHOLD(p);
63 	if (proc_rwmem(p, &uio) != 0) {
64 		PRELE(p);
65 		return (-1);
66 	}
67 	PRELE(p);
68 
69 	return (0);
70 }
71 
72 static int
73 uread(proc_t *p, void *kaddr, size_t len, uintptr_t uaddr)
74 {
75 
76 	return (proc_ops(UIO_READ, p, kaddr, uaddr, len));
77 }
78 
79 static int
80 uwrite(proc_t *p, void *kaddr, size_t len, uintptr_t uaddr)
81 {
82 
83 	return (proc_ops(UIO_WRITE, p, kaddr, uaddr, len));
84 }
85 
86 int
87 fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp)
88 {
89 	fasttrap_instr_t instr = FASTTRAP_INSTR;
90 
91 	if (uwrite(p, &instr, 4, tp->ftt_pc) != 0)
92 		return (-1);
93 
94 	return (0);
95 }
96 
97 int
98 fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp)
99 {
100 	uint32_t instr;
101 
102 	/*
103 	 * Distinguish between read or write failures and a changed
104 	 * instruction.
105 	 */
106 	if (uread(p, &instr, 4, tp->ftt_pc) != 0)
107 		return (0);
108 	if (instr != FASTTRAP_INSTR)
109 		return (0);
110 	if (uwrite(p, &tp->ftt_instr, 4, tp->ftt_pc) != 0)
111 		return (-1);
112 
113 	return (0);
114 }
115 
116 int
117 fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, uintptr_t pc,
118     fasttrap_probe_type_t type)
119 {
120 	uint32_t instr;
121 	//int32_t disp;
122 
123 	/*
124 	 * Read the instruction at the given address out of the process's
125 	 * address space. We don't have to worry about a debugger
126 	 * changing this instruction before we overwrite it with our trap
127 	 * instruction since P_PR_LOCK is set.
128 	 */
129 	if (uread(p, &instr, 4, pc) != 0)
130 		return (-1);
131 
132 	/*
133 	 * Decode the instruction to fill in the probe flags. We can have
134 	 * the process execute most instructions on its own using a pc/npc
135 	 * trick, but pc-relative control transfer present a problem since
136 	 * we're relocating the instruction. We emulate these instructions
137 	 * in the kernel. We assume a default type and over-write that as
138 	 * needed.
139 	 *
140 	 * pc-relative instructions must be emulated for correctness;
141 	 * other instructions (which represent a large set of commonly traced
142 	 * instructions) are emulated or otherwise optimized for performance.
143 	 */
144 	tp->ftt_type = FASTTRAP_T_COMMON;
145 	tp->ftt_instr = instr;
146 
147 	switch (OP(instr)) {
148 	/* The following are invalid for trapping (invalid opcodes, tw/twi). */
149 	case 0:
150 	case 1:
151 	case 2:
152 	case 4:
153 	case 5:
154 	case 6:
155 	case 30:
156 	case 39:
157 	case 58:
158 	case 62:
159 	case 3:	/* twi */
160 		return (-1);
161 	case 31:	/* tw */
162 		if (OPX(instr) == 4)
163 			return (-1);
164 		else if (OPX(instr) == 444 && OP_RS(instr) == OP_RA(instr) &&
165 		    OP_RS(instr) == OP_RB(instr))
166 			tp->ftt_type = FASTTRAP_T_NOP;
167 		break;
168 	case 16:
169 		tp->ftt_type = FASTTRAP_T_BC;
170 		tp->ftt_dest = instr & 0x0000FFFC; /* Extract target address */
171 		if (instr & 0x00008000)
172 			tp->ftt_dest |= 0xFFFF0000;
173 		/* Use as offset if not absolute address. */
174 		if (!(instr & 0x02))
175 			tp->ftt_dest += pc;
176 		tp->ftt_bo = OP_BO(instr);
177 		tp->ftt_bi = OP_BI(instr);
178 		break;
179 	case 18:
180 		tp->ftt_type = FASTTRAP_T_B;
181 		tp->ftt_dest = instr & 0x03FFFFFC; /* Extract target address */
182 		if (instr & 0x02000000)
183 			tp->ftt_dest |= 0xFC000000;
184 		/* Use as offset if not absolute address. */
185 		if (!(instr & 0x02))
186 			tp->ftt_dest += pc;
187 		break;
188 	case 19:
189 		switch (OPX(instr)) {
190 		case 528:	/* bcctr */
191 			tp->ftt_type = FASTTRAP_T_BCTR;
192 			tp->ftt_bo = OP_BO(instr);
193 			tp->ftt_bi = OP_BI(instr);
194 			break;
195 		case 16:	/* bclr */
196 			tp->ftt_type = FASTTRAP_T_BCTR;
197 			tp->ftt_bo = OP_BO(instr);
198 			tp->ftt_bi = OP_BI(instr);
199 			break;
200 		};
201 		break;
202 	case 24:
203 		if (OP_RS(instr) == OP_RA(instr) &&
204 		    (instr & 0x0000FFFF) == 0)
205 			tp->ftt_type = FASTTRAP_T_NOP;
206 		break;
207 	};
208 
209 	/*
210 	 * We don't know how this tracepoint is going to be used, but in case
211 	 * it's used as part of a function return probe, we need to indicate
212 	 * whether it's always a return site or only potentially a return
213 	 * site. If it's part of a return probe, it's always going to be a
214 	 * return from that function if it's a restore instruction or if
215 	 * the previous instruction was a return. If we could reliably
216 	 * distinguish jump tables from return sites, this wouldn't be
217 	 * necessary.
218 	 */
219 #if 0
220 	if (tp->ftt_type != FASTTRAP_T_RESTORE &&
221 	    (uread(p, &instr, 4, pc - sizeof (instr)) != 0 ||
222 	    !(OP(instr) == 2 && OP3(instr) == OP3_RETURN)))
223 		tp->ftt_flags |= FASTTRAP_F_RETMAYBE;
224 #endif
225 
226 	return (0);
227 }
228 
229 static uint64_t
230 fasttrap_anarg(struct reg *rp, int argno)
231 {
232 	uint64_t value;
233 	proc_t  *p = curproc;
234 
235 	/* The first 8 arguments are in registers. */
236 	if (argno < 8)
237 		return rp->fixreg[argno + 3];
238 
239 	/* Arguments on stack start after SP+LR (2 register slots). */
240 	if (SV_PROC_FLAG(p, SV_ILP32)) {
241 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
242 		value = dtrace_fuword32((void *)(rp->fixreg[1] + 8 +
243 		    ((argno - 8) * sizeof(uint32_t))));
244 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
245 	} else {
246 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
247 		value = dtrace_fuword64((void *)(rp->fixreg[1] + 48 +
248 		    ((argno - 8) * sizeof(uint64_t))));
249 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
250 	}
251 	return value;
252 }
253 
254 uint64_t
255 fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
256     int aframes)
257 {
258 	struct reg r;
259 
260 	fill_regs(curthread, &r);
261 
262 	return (fasttrap_anarg(&r, argno));
263 }
264 
265 uint64_t
266 fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
267     int aframes)
268 {
269 	struct reg r;
270 
271 	fill_regs(curthread, &r);
272 
273 	return (fasttrap_anarg(&r, argno));
274 }
275 
276 static void
277 fasttrap_usdt_args(fasttrap_probe_t *probe, struct reg *rp, int argc,
278     uintptr_t *argv)
279 {
280 	int i, x, cap = MIN(argc, probe->ftp_nargs);
281 
282 	for (i = 0; i < cap; i++) {
283 		x = probe->ftp_argmap[i];
284 
285 		if (x < 8)
286 			argv[i] = rp->fixreg[x];
287 		else
288 			if (SV_PROC_FLAG(curproc, SV_ILP32))
289 				argv[i] = fuword32((void *)(rp->fixreg[1] + 8 +
290 				    (x * sizeof(uint32_t))));
291 			else
292 				argv[i] = fuword64((void *)(rp->fixreg[1] + 48 +
293 				    (x * sizeof(uint64_t))));
294 	}
295 
296 	for (; i < argc; i++) {
297 		argv[i] = 0;
298 	}
299 }
300 
301 static void
302 fasttrap_return_common(struct reg *rp, uintptr_t pc, pid_t pid,
303     uintptr_t new_pc)
304 {
305 	fasttrap_tracepoint_t *tp;
306 	fasttrap_bucket_t *bucket;
307 	fasttrap_id_t *id;
308 
309 	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
310 
311 	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
312 		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
313 		    tp->ftt_proc->ftpc_acount != 0)
314 			break;
315 	}
316 
317 	/*
318 	 * Don't sweat it if we can't find the tracepoint again; unlike
319 	 * when we're in fasttrap_pid_probe(), finding the tracepoint here
320 	 * is not essential to the correct execution of the process.
321 	 */
322 	if (tp == NULL) {
323 		return;
324 	}
325 
326 	for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
327 		/*
328 		 * If there's a branch that could act as a return site, we
329 		 * need to trace it, and check here if the program counter is
330 		 * external to the function.
331 		 */
332 		/* Skip function-local branches. */
333 		if ((new_pc - id->fti_probe->ftp_faddr) < id->fti_probe->ftp_fsize)
334 			continue;
335 
336 		dtrace_probe(id->fti_probe->ftp_id,
337 		    pc - id->fti_probe->ftp_faddr,
338 		    rp->fixreg[3], rp->fixreg[4], 0, 0);
339 	}
340 }
341 
342 
343 static int
344 fasttrap_branch_taken(int bo, int bi, struct reg *regs)
345 {
346 	int crzero = 0;
347 
348 	/* Branch always? */
349 	if ((bo & 0x14) == 0x14)
350 		return 1;
351 
352 	/* Handle decrementing ctr */
353 	if (!(bo & 0x04)) {
354 		--regs->ctr;
355 		crzero = (regs->ctr == 0);
356 		if (bo & 0x10) {
357 			return (!(crzero ^ (bo >> 1)));
358 		}
359 	}
360 
361 	return (crzero | (((regs->cr >> (31 - bi)) ^ (bo >> 3)) ^ 1));
362 }
363 
364 
365 int
366 fasttrap_pid_probe(struct reg *rp)
367 {
368 	proc_t *p = curproc;
369 	uintptr_t pc = rp->pc;
370 	uintptr_t new_pc = 0;
371 	fasttrap_bucket_t *bucket;
372 	fasttrap_tracepoint_t *tp, tp_local;
373 	pid_t pid;
374 	dtrace_icookie_t cookie;
375 	uint_t is_enabled = 0;
376 
377 	/*
378 	 * It's possible that a user (in a veritable orgy of bad planning)
379 	 * could redirect this thread's flow of control before it reached the
380 	 * return probe fasttrap. In this case we need to kill the process
381 	 * since it's in a unrecoverable state.
382 	 */
383 	if (curthread->t_dtrace_step) {
384 		ASSERT(curthread->t_dtrace_on);
385 		fasttrap_sigtrap(p, curthread, pc);
386 		return (0);
387 	}
388 
389 	/*
390 	 * Clear all user tracing flags.
391 	 */
392 	curthread->t_dtrace_ft = 0;
393 	curthread->t_dtrace_pc = 0;
394 	curthread->t_dtrace_npc = 0;
395 	curthread->t_dtrace_scrpc = 0;
396 	curthread->t_dtrace_astpc = 0;
397 
398 
399 	PROC_LOCK(p);
400 	pid = p->p_pid;
401 	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
402 
403 	/*
404 	 * Lookup the tracepoint that the process just hit.
405 	 */
406 	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
407 		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
408 		    tp->ftt_proc->ftpc_acount != 0)
409 			break;
410 	}
411 
412 	/*
413 	 * If we couldn't find a matching tracepoint, either a tracepoint has
414 	 * been inserted without using the pid<pid> ioctl interface (see
415 	 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
416 	 */
417 	if (tp == NULL) {
418 		PROC_UNLOCK(p);
419 		return (-1);
420 	}
421 
422 	if (tp->ftt_ids != NULL) {
423 		fasttrap_id_t *id;
424 
425 		for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
426 			fasttrap_probe_t *probe = id->fti_probe;
427 
428 			if (id->fti_ptype == DTFTP_ENTRY) {
429 				/*
430 				 * We note that this was an entry
431 				 * probe to help ustack() find the
432 				 * first caller.
433 				 */
434 				cookie = dtrace_interrupt_disable();
435 				DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
436 				dtrace_probe(probe->ftp_id, rp->fixreg[3],
437 						rp->fixreg[4], rp->fixreg[5], rp->fixreg[6],
438 						rp->fixreg[7]);
439 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
440 				dtrace_interrupt_enable(cookie);
441 			} else if (id->fti_ptype == DTFTP_IS_ENABLED) {
442 				/*
443 				 * Note that in this case, we don't
444 				 * call dtrace_probe() since it's only
445 				 * an artificial probe meant to change
446 				 * the flow of control so that it
447 				 * encounters the true probe.
448 				 */
449 				is_enabled = 1;
450 			} else if (probe->ftp_argmap == NULL) {
451 				dtrace_probe(probe->ftp_id, rp->fixreg[3],
452 				    rp->fixreg[4], rp->fixreg[5], rp->fixreg[6],
453 				    rp->fixreg[7]);
454 			} else {
455 				uintptr_t t[5];
456 
457 				fasttrap_usdt_args(probe, rp,
458 				    sizeof (t) / sizeof (t[0]), t);
459 
460 				dtrace_probe(probe->ftp_id, t[0], t[1],
461 				    t[2], t[3], t[4]);
462 			}
463 		}
464 	}
465 
466 	/*
467 	 * We're about to do a bunch of work so we cache a local copy of
468 	 * the tracepoint to emulate the instruction, and then find the
469 	 * tracepoint again later if we need to light up any return probes.
470 	 */
471 	tp_local = *tp;
472 	PROC_UNLOCK(p);
473 	tp = &tp_local;
474 
475 	/*
476 	 * If there's an is-enabled probe connected to this tracepoint it
477 	 * means that there was a 'xor r3, r3, r3'
478 	 * instruction that was placed there by DTrace when the binary was
479 	 * linked. As this probe is, in fact, enabled, we need to stuff 1
480 	 * into R3. Accordingly, we can bypass all the instruction
481 	 * emulation logic since we know the inevitable result. It's possible
482 	 * that a user could construct a scenario where the 'is-enabled'
483 	 * probe was on some other instruction, but that would be a rather
484 	 * exotic way to shoot oneself in the foot.
485 	 */
486 	if (is_enabled) {
487 		rp->fixreg[3] = 1;
488 		new_pc = rp->pc + 4;
489 		goto done;
490 	}
491 
492 
493 	switch (tp->ftt_type) {
494 	case FASTTRAP_T_NOP:
495 		new_pc = rp->pc + 4;
496 		break;
497 	case FASTTRAP_T_BC:
498 		if (!fasttrap_branch_taken(tp->ftt_bo, tp->ftt_bi, rp))
499 			break;
500 		/* FALLTHROUGH */
501 	case FASTTRAP_T_B:
502 		if (tp->ftt_instr & 0x01)
503 			rp->lr = rp->pc + 4;
504 		new_pc = tp->ftt_dest;
505 		break;
506 	case FASTTRAP_T_BLR:
507 	case FASTTRAP_T_BCTR:
508 		if (!fasttrap_branch_taken(tp->ftt_bo, tp->ftt_bi, rp))
509 			break;
510 		/* FALLTHROUGH */
511 		if (tp->ftt_type == FASTTRAP_T_BCTR)
512 			new_pc = rp->ctr;
513 		else
514 			new_pc = rp->lr;
515 		if (tp->ftt_instr & 0x01)
516 			rp->lr = rp->pc + 4;
517 		break;
518 	case FASTTRAP_T_COMMON:
519 		break;
520 	};
521 done:
522 	/*
523 	 * If there were no return probes when we first found the tracepoint,
524 	 * we should feel no obligation to honor any return probes that were
525 	 * subsequently enabled -- they'll just have to wait until the next
526 	 * time around.
527 	 */
528 	if (tp->ftt_retids != NULL) {
529 		/*
530 		 * We need to wait until the results of the instruction are
531 		 * apparent before invoking any return probes. If this
532 		 * instruction was emulated we can just call
533 		 * fasttrap_return_common(); if it needs to be executed, we
534 		 * need to wait until the user thread returns to the kernel.
535 		 */
536 		if (tp->ftt_type != FASTTRAP_T_COMMON) {
537 			fasttrap_return_common(rp, pc, pid, new_pc);
538 		} else {
539 			ASSERT(curthread->t_dtrace_ret != 0);
540 			ASSERT(curthread->t_dtrace_pc == pc);
541 			ASSERT(curthread->t_dtrace_scrpc != 0);
542 			ASSERT(new_pc == curthread->t_dtrace_astpc);
543 		}
544 	}
545 
546 	rp->pc = new_pc;
547 	set_regs(curthread, rp);
548 
549 	return (0);
550 }
551 
552 int
553 fasttrap_return_probe(struct reg *rp)
554 {
555 	proc_t *p = curproc;
556 	uintptr_t pc = curthread->t_dtrace_pc;
557 	uintptr_t npc = curthread->t_dtrace_npc;
558 
559 	curthread->t_dtrace_pc = 0;
560 	curthread->t_dtrace_npc = 0;
561 	curthread->t_dtrace_scrpc = 0;
562 	curthread->t_dtrace_astpc = 0;
563 
564 	/*
565 	 * We set rp->pc to the address of the traced instruction so
566 	 * that it appears to dtrace_probe() that we're on the original
567 	 * instruction, and so that the user can't easily detect our
568 	 * complex web of lies. dtrace_return_probe() (our caller)
569 	 * will correctly set %pc after we return.
570 	 */
571 	rp->pc = pc;
572 
573 	fasttrap_return_common(rp, pc, p->p_pid, npc);
574 
575 	return (0);
576 }
577 
578