xref: /freebsd/sys/cddl/dev/dtrace/powerpc/dtrace_isa.c (revision 076ad2f8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * Portions Copyright 2012,2013 Justin Hibbits <jhibbits@freebsd.org>
23  *
24  * $FreeBSD$
25  */
26 /*
27  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 #include <sys/cdefs.h>
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/stack.h>
36 #include <sys/sysent.h>
37 #include <sys/pcpu.h>
38 
39 #include <machine/frame.h>
40 #include <machine/md_var.h>
41 #include <machine/reg.h>
42 #include <machine/stack.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/pmap.h>
47 
48 #include "regset.h"
49 
50 /* Offset to the LR Save word (ppc32) */
51 #define RETURN_OFFSET	4
52 /* Offset to LR Save word (ppc64).  CR Save area sits between back chain and LR */
53 #define RETURN_OFFSET64	16
54 
55 #ifdef __powerpc64__
56 #define OFFSET 4 /* Account for the TOC reload slot */
57 #else
58 #define OFFSET 0
59 #endif
60 
61 #define INKERNEL(x)	((x) <= VM_MAX_KERNEL_ADDRESS && \
62 		(x) >= VM_MIN_KERNEL_ADDRESS)
63 
64 static __inline int
65 dtrace_sp_inkernel(uintptr_t sp, int aframes)
66 {
67 	vm_offset_t callpc;
68 
69 #ifdef __powerpc64__
70 	callpc = *(vm_offset_t *)(sp + RETURN_OFFSET64);
71 #else
72 	callpc = *(vm_offset_t *)(sp + RETURN_OFFSET);
73 #endif
74 	if ((callpc & 3) || (callpc < 0x100))
75 		return (0);
76 
77 	/*
78 	 * trapexit() and asttrapexit() are sentinels
79 	 * for kernel stack tracing.
80 	 *
81 	 * Special-case this for 'aframes == 0', because fbt sets aframes to the
82 	 * trap callchain depth, so we want to break out of it.
83 	 */
84 	if ((callpc + OFFSET == (vm_offset_t) &trapexit ||
85 	    callpc + OFFSET == (vm_offset_t) &asttrapexit) &&
86 	    aframes != 0)
87 		return (0);
88 
89 	return (1);
90 }
91 
92 static __inline uintptr_t
93 dtrace_next_sp(uintptr_t sp)
94 {
95 	vm_offset_t callpc;
96 
97 #ifdef __powerpc64__
98 	callpc = *(vm_offset_t *)(sp + RETURN_OFFSET64);
99 #else
100 	callpc = *(vm_offset_t *)(sp + RETURN_OFFSET);
101 #endif
102 
103 	/*
104 	 * trapexit() and asttrapexit() are sentinels
105 	 * for kernel stack tracing.
106 	 *
107 	 * Special-case this for 'aframes == 0', because fbt sets aframes to the
108 	 * trap callchain depth, so we want to break out of it.
109 	 */
110 	if ((callpc + OFFSET == (vm_offset_t) &trapexit ||
111 	    callpc + OFFSET == (vm_offset_t) &asttrapexit))
112 	    /* Access the trap frame */
113 #ifdef __powerpc64__
114 		return (*(uintptr_t *)sp + 48 + sizeof(register_t));
115 #else
116 		return (*(uintptr_t *)sp + 8 + sizeof(register_t));
117 #endif
118 
119 	return (*(uintptr_t*)sp);
120 }
121 
122 static __inline uintptr_t
123 dtrace_get_pc(uintptr_t sp)
124 {
125 	vm_offset_t callpc;
126 
127 #ifdef __powerpc64__
128 	callpc = *(vm_offset_t *)(sp + RETURN_OFFSET64);
129 #else
130 	callpc = *(vm_offset_t *)(sp + RETURN_OFFSET);
131 #endif
132 
133 	/*
134 	 * trapexit() and asttrapexit() are sentinels
135 	 * for kernel stack tracing.
136 	 *
137 	 * Special-case this for 'aframes == 0', because fbt sets aframes to the
138 	 * trap callchain depth, so we want to break out of it.
139 	 */
140 	if ((callpc + OFFSET == (vm_offset_t) &trapexit ||
141 	    callpc + OFFSET == (vm_offset_t) &asttrapexit))
142 	    /* Access the trap frame */
143 #ifdef __powerpc64__
144 		return (*(uintptr_t *)sp + 48 + offsetof(struct trapframe, lr));
145 #else
146 		return (*(uintptr_t *)sp + 8 + offsetof(struct trapframe, lr));
147 #endif
148 
149 	return (callpc);
150 }
151 
152 greg_t
153 dtrace_getfp(void)
154 {
155 	return (greg_t)__builtin_frame_address(0);
156 }
157 
158 void
159 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
160     uint32_t *intrpc)
161 {
162 	int depth = 0;
163 	uintptr_t osp, sp;
164 	vm_offset_t callpc;
165 	pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
166 
167 	osp = PAGE_SIZE;
168 	if (intrpc != 0)
169 		pcstack[depth++] = (pc_t) intrpc;
170 
171 	aframes++;
172 
173 	sp = dtrace_getfp();
174 
175 	while (depth < pcstack_limit) {
176 		if (sp <= osp)
177 			break;
178 
179 		if (!dtrace_sp_inkernel(sp, aframes))
180 			break;
181 		callpc = dtrace_get_pc(sp);
182 
183 		if (aframes > 0) {
184 			aframes--;
185 			if ((aframes == 0) && (caller != 0)) {
186 				pcstack[depth++] = caller;
187 			}
188 		}
189 		else {
190 			pcstack[depth++] = callpc;
191 		}
192 
193 		osp = sp;
194 		sp = dtrace_next_sp(sp);
195 	}
196 
197 	for (; depth < pcstack_limit; depth++) {
198 		pcstack[depth] = 0;
199 	}
200 }
201 
202 static int
203 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
204     uintptr_t sp)
205 {
206 	proc_t *p = curproc;
207 	int ret = 0;
208 
209 	ASSERT(pcstack == NULL || pcstack_limit > 0);
210 
211 	while (pc != 0) {
212 		ret++;
213 		if (pcstack != NULL) {
214 			*pcstack++ = (uint64_t)pc;
215 			pcstack_limit--;
216 			if (pcstack_limit <= 0)
217 				break;
218 		}
219 
220 		if (sp == 0)
221 			break;
222 
223 		if (SV_PROC_FLAG(p, SV_ILP32)) {
224 			pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
225 			sp = dtrace_fuword32((void *)sp);
226 		}
227 		else {
228 			pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
229 			sp = dtrace_fuword64((void *)sp);
230 		}
231 	}
232 
233 	return (ret);
234 }
235 
236 void
237 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
238 {
239 	proc_t *p = curproc;
240 	struct trapframe *tf;
241 	uintptr_t pc, sp;
242 	volatile uint16_t *flags =
243 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
244 	int n;
245 
246 	if (*flags & CPU_DTRACE_FAULT)
247 		return;
248 
249 	if (pcstack_limit <= 0)
250 		return;
251 
252 	/*
253 	 * If there's no user context we still need to zero the stack.
254 	 */
255 	if (p == NULL || (tf = curthread->td_frame) == NULL)
256 		goto zero;
257 
258 	*pcstack++ = (uint64_t)p->p_pid;
259 	pcstack_limit--;
260 
261 	if (pcstack_limit <= 0)
262 		return;
263 
264 	pc = tf->srr0;
265 	sp = tf->fixreg[1];
266 
267 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
268 		/*
269 		 * In an entry probe.  The frame pointer has not yet been
270 		 * pushed (that happens in the function prologue).  The
271 		 * best approach is to add the current pc as a missing top
272 		 * of stack and back the pc up to the caller, which is stored
273 		 * at the current stack pointer address since the call
274 		 * instruction puts it there right before the branch.
275 		 */
276 
277 		*pcstack++ = (uint64_t)pc;
278 		pcstack_limit--;
279 		if (pcstack_limit <= 0)
280 			return;
281 
282 		pc = tf->lr;
283 	}
284 
285 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
286 	ASSERT(n >= 0);
287 	ASSERT(n <= pcstack_limit);
288 
289 	pcstack += n;
290 	pcstack_limit -= n;
291 
292 zero:
293 	while (pcstack_limit-- > 0)
294 		*pcstack++ = 0;
295 }
296 
297 int
298 dtrace_getustackdepth(void)
299 {
300 	proc_t *p = curproc;
301 	struct trapframe *tf;
302 	uintptr_t pc, sp;
303 	int n = 0;
304 
305 	if (p == NULL || (tf = curthread->td_frame) == NULL)
306 		return (0);
307 
308 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
309 		return (-1);
310 
311 	pc = tf->srr0;
312 	sp = tf->fixreg[1];
313 
314 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
315 		/*
316 		 * In an entry probe.  The frame pointer has not yet been
317 		 * pushed (that happens in the function prologue).  The
318 		 * best approach is to add the current pc as a missing top
319 		 * of stack and back the pc up to the caller, which is stored
320 		 * at the current stack pointer address since the call
321 		 * instruction puts it there right before the branch.
322 		 */
323 
324 		if (SV_PROC_FLAG(p, SV_ILP32)) {
325 			pc = dtrace_fuword32((void *) sp);
326 		}
327 		else
328 			pc = dtrace_fuword64((void *) sp);
329 		n++;
330 	}
331 
332 	n += dtrace_getustack_common(NULL, 0, pc, sp);
333 
334 	return (n);
335 }
336 
337 void
338 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
339 {
340 	proc_t *p = curproc;
341 	struct trapframe *tf;
342 	uintptr_t pc, sp;
343 	volatile uint16_t *flags =
344 	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
345 #ifdef notyet	/* XXX signal stack */
346 	uintptr_t oldcontext;
347 	size_t s1, s2;
348 #endif
349 
350 	if (*flags & CPU_DTRACE_FAULT)
351 		return;
352 
353 	if (pcstack_limit <= 0)
354 		return;
355 
356 	/*
357 	 * If there's no user context we still need to zero the stack.
358 	 */
359 	if (p == NULL || (tf = curthread->td_frame) == NULL)
360 		goto zero;
361 
362 	*pcstack++ = (uint64_t)p->p_pid;
363 	pcstack_limit--;
364 
365 	if (pcstack_limit <= 0)
366 		return;
367 
368 	pc = tf->srr0;
369 	sp = tf->fixreg[1];
370 
371 #ifdef notyet /* XXX signal stack */
372 	oldcontext = lwp->lwp_oldcontext;
373 	s1 = sizeof (struct xframe) + 2 * sizeof (long);
374 	s2 = s1 + sizeof (siginfo_t);
375 #endif
376 
377 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
378 		*pcstack++ = (uint64_t)pc;
379 		*fpstack++ = 0;
380 		pcstack_limit--;
381 		if (pcstack_limit <= 0)
382 			return;
383 
384 		if (SV_PROC_FLAG(p, SV_ILP32)) {
385 			pc = dtrace_fuword32((void *)sp);
386 		}
387 		else {
388 			pc = dtrace_fuword64((void *)sp);
389 		}
390 	}
391 
392 	while (pc != 0) {
393 		*pcstack++ = (uint64_t)pc;
394 		*fpstack++ = sp;
395 		pcstack_limit--;
396 		if (pcstack_limit <= 0)
397 			break;
398 
399 		if (sp == 0)
400 			break;
401 
402 #ifdef notyet /* XXX signal stack */
403 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
404 			ucontext_t *ucp = (ucontext_t *)oldcontext;
405 			greg_t *gregs = ucp->uc_mcontext.gregs;
406 
407 			sp = dtrace_fulword(&gregs[REG_FP]);
408 			pc = dtrace_fulword(&gregs[REG_PC]);
409 
410 			oldcontext = dtrace_fulword(&ucp->uc_link);
411 		} else
412 #endif /* XXX */
413 		{
414 			if (SV_PROC_FLAG(p, SV_ILP32)) {
415 				pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
416 				sp = dtrace_fuword32((void *)sp);
417 			}
418 			else {
419 				pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
420 				sp = dtrace_fuword64((void *)sp);
421 			}
422 		}
423 
424 		/*
425 		 * This is totally bogus:  if we faulted, we're going to clear
426 		 * the fault and break.  This is to deal with the apparently
427 		 * broken Java stacks on x86.
428 		 */
429 		if (*flags & CPU_DTRACE_FAULT) {
430 			*flags &= ~CPU_DTRACE_FAULT;
431 			break;
432 		}
433 	}
434 
435 zero:
436 	while (pcstack_limit-- > 0)
437 		*pcstack++ = 0;
438 }
439 
440 /*ARGSUSED*/
441 uint64_t
442 dtrace_getarg(int arg, int aframes)
443 {
444 	uintptr_t val;
445 	uintptr_t *fp = (uintptr_t *)dtrace_getfp();
446 	uintptr_t *stack;
447 	int i;
448 
449 	/*
450 	 * A total of 8 arguments are passed via registers; any argument with
451 	 * index of 7 or lower is therefore in a register.
452 	 */
453 	int inreg = 7;
454 
455 	for (i = 1; i <= aframes; i++) {
456 		fp = (uintptr_t *)*fp;
457 
458 		/*
459 		 * On ppc32 AIM, and booke, trapexit() is the immediately following
460 		 * label.  On ppc64 AIM trapexit() follows a nop.
461 		 */
462 #ifdef __powerpc64__
463 		if ((long)(fp[2]) + 4 == (long)trapexit) {
464 #else
465 		if ((long)(fp[1]) == (long)trapexit) {
466 #endif
467 			/*
468 			 * In the case of powerpc, we will use the pointer to the regs
469 			 * structure that was pushed when we took the trap.  To get this
470 			 * structure, we must increment beyond the frame structure.  If the
471 			 * argument that we're seeking is passed on the stack, we'll pull
472 			 * the true stack pointer out of the saved registers and decrement
473 			 * our argument by the number of arguments passed in registers; if
474 			 * the argument we're seeking is passed in regsiters, we can just
475 			 * load it directly.
476 			 */
477 #ifdef __powerpc64__
478 			struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 48);
479 #else
480 			struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 8);
481 #endif
482 
483 			if (arg <= inreg) {
484 				stack = &rp->fixreg[3];
485 			} else {
486 				stack = (uintptr_t *)(rp->fixreg[1]);
487 				arg -= inreg;
488 			}
489 			goto load;
490 		}
491 
492 	}
493 
494 	/*
495 	 * We know that we did not come through a trap to get into
496 	 * dtrace_probe() -- the provider simply called dtrace_probe()
497 	 * directly.  As this is the case, we need to shift the argument
498 	 * that we're looking for:  the probe ID is the first argument to
499 	 * dtrace_probe(), so the argument n will actually be found where
500 	 * one would expect to find argument (n + 1).
501 	 */
502 	arg++;
503 
504 	if (arg <= inreg) {
505 		/*
506 		 * This shouldn't happen.  If the argument is passed in a
507 		 * register then it should have been, well, passed in a
508 		 * register...
509 		 */
510 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
511 		return (0);
512 	}
513 
514 	arg -= (inreg + 1);
515 	stack = fp + 2;
516 
517 load:
518 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
519 	val = stack[arg];
520 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
521 
522 	return (val);
523 }
524 
525 int
526 dtrace_getstackdepth(int aframes)
527 {
528 	int depth = 0;
529 	uintptr_t osp, sp;
530 	vm_offset_t callpc;
531 
532 	osp = PAGE_SIZE;
533 	aframes++;
534 	sp = dtrace_getfp();
535 	depth++;
536 	for(;;) {
537 		if (sp <= osp)
538 			break;
539 
540 		if (!dtrace_sp_inkernel(sp, aframes))
541 			break;
542 
543 		if (aframes == 0)
544 			depth++;
545 		else
546 			aframes--;
547 		osp = sp;
548 		sp = dtrace_next_sp(sp);
549 	}
550 	if (depth < aframes)
551 		return (0);
552 
553 	return (depth);
554 }
555 
556 ulong_t
557 dtrace_getreg(struct trapframe *rp, uint_t reg)
558 {
559 	if (reg < 32)
560 		return (rp->fixreg[reg]);
561 
562 	switch (reg) {
563 	case 33:
564 		return (rp->lr);
565 	case 34:
566 		return (rp->cr);
567 	case 35:
568 		return (rp->xer);
569 	case 36:
570 		return (rp->ctr);
571 	case 37:
572 		return (rp->srr0);
573 	case 38:
574 		return (rp->srr1);
575 	case 39:
576 		return (rp->exc);
577 	default:
578 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
579 		return (0);
580 	}
581 }
582 
583 static int
584 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
585 {
586 	ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
587 
588 	if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
589 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
590 		cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
591 		return (0);
592 	}
593 
594 	return (1);
595 }
596 
597 void
598 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
599     volatile uint16_t *flags)
600 {
601 	if (dtrace_copycheck(uaddr, kaddr, size))
602 		if (copyin((const void *)uaddr, (void *)kaddr, size)) {
603 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
604 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
605 		}
606 }
607 
608 void
609 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
610     volatile uint16_t *flags)
611 {
612 	if (dtrace_copycheck(uaddr, kaddr, size)) {
613 		if (copyout((const void *)kaddr, (void *)uaddr, size)) {
614 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
615 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
616 		}
617 	}
618 }
619 
620 void
621 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
622     volatile uint16_t *flags)
623 {
624 	size_t actual;
625 	int    error;
626 
627 	if (dtrace_copycheck(uaddr, kaddr, size)) {
628 		error = copyinstr((const void *)uaddr, (void *)kaddr,
629 		    size, &actual);
630 
631 		/* ENAMETOOLONG is not a fault condition. */
632 		if (error && error != ENAMETOOLONG) {
633 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
634 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
635 		}
636 	}
637 }
638 
639 /*
640  * The bulk of this function could be replaced to match dtrace_copyinstr()
641  * if we ever implement a copyoutstr().
642  */
643 void
644 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
645     volatile uint16_t *flags)
646 {
647 	size_t len;
648 
649 	if (dtrace_copycheck(uaddr, kaddr, size)) {
650 		len = strlen((const char *)kaddr);
651 		if (len > size)
652 			len = size;
653 
654 		if (copyout((const void *)kaddr, (void *)uaddr, len)) {
655 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
656 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
657 		}
658 	}
659 }
660 
661 uint8_t
662 dtrace_fuword8(void *uaddr)
663 {
664 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
665 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
666 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
667 		return (0);
668 	}
669 	return (fubyte(uaddr));
670 }
671 
672 uint16_t
673 dtrace_fuword16(void *uaddr)
674 {
675 	uint16_t ret = 0;
676 
677 	if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
678 		if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
679 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
680 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
681 		}
682 	}
683 	return ret;
684 }
685 
686 uint32_t
687 dtrace_fuword32(void *uaddr)
688 {
689 	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
690 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
691 		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
692 		return (0);
693 	}
694 	return (fuword32(uaddr));
695 }
696 
697 uint64_t
698 dtrace_fuword64(void *uaddr)
699 {
700 	uint64_t ret = 0;
701 
702 	if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
703 		if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
704 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
705 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
706 		}
707 	}
708 	return ret;
709 }
710 
711 uintptr_t
712 dtrace_fulword(void *uaddr)
713 {
714 	uintptr_t ret = 0;
715 
716 	if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
717 		if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
718 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
719 			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
720 		}
721 	}
722 	return ret;
723 }
724