1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * $FreeBSD: src/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c,v 1.10.2.1 2009/08/03 08:13:06 kensmith Exp $
22 */
23
24 /*
25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28
29 /* #pragma ident "%Z%%M% %I% %E% SMI" */
30
31 /*
32 * DTrace - Dynamic Tracing for Solaris
33 *
34 * This is the implementation of the Solaris Dynamic Tracing framework
35 * (DTrace). The user-visible interface to DTrace is described at length in
36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
37 * library, the in-kernel DTrace framework, and the DTrace providers are
38 * described in the block comments in the <sys/dtrace.h> header file. The
39 * internal architecture of DTrace is described in the block comments in the
40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
41 * implementation very much assume mastery of all of these sources; if one has
42 * an unanswered question about the implementation, one should consult them
43 * first.
44 *
45 * The functions here are ordered roughly as follows:
46 *
47 * - Probe context functions
48 * - Probe hashing functions
49 * - Non-probe context utility functions
50 * - Matching functions
51 * - Provider-to-Framework API functions
52 * - Probe management functions
53 * - DIF object functions
54 * - Format functions
55 * - Predicate functions
56 * - ECB functions
57 * - Buffer functions
58 * - Enabling functions
59 * - DOF functions
60 * - Anonymous enabling functions
61 * - Consumer state functions
62 * - Helper functions
63 * - Hook functions
64 * - Driver cookbook functions
65 *
66 * Each group of functions begins with a block comment labelled the "DTrace
67 * [Group] Functions", allowing one to find each block by searching forward
68 * on capital-f functions.
69 */
70 #if !defined(sun)
71 /* we need internal access to mutexes for state inspection */
72 #define __MUTEX_PRIVATE
73 #define __RWLOCK_PRIVATE
74 #endif
75
76 #include <sys/errno.h>
77 #if !defined(sun)
78 #include <sys/time.h>
79 #endif
80 #include <sys/stat.h>
81 #include <sys/conf.h>
82 #include <sys/systm.h>
83 #if defined(sun)
84 #include <sys/modctl.h>
85 #include <sys/ddi.h>
86 #include <sys/sunddi.h>
87 #endif
88 #include <sys/cpuvar.h>
89 #include <sys/kmem.h>
90 #if defined(sun)
91 #include <sys/strsubr.h>
92 #endif
93 #include <sys/sysmacros.h>
94 #include <sys/dtrace_impl.h>
95 #include <sys/atomic.h>
96 #include <sys/cmn_err.h>
97 #include <sys/mutex_impl.h>
98 #include <sys/rwlock_impl.h>
99 #include <sys/ctf_api.h>
100 #if defined(sun)
101 #include <sys/panic.h>
102 #include <sys/priv_impl.h>
103 #endif
104 #include <sys/policy.h>
105 #if defined(sun)
106 #include <sys/cred_impl.h>
107 #include <sys/procfs_isa.h>
108 #endif
109 #include <sys/taskq.h>
110 #if defined(sun)
111 #include <sys/mkdev.h>
112 #include <sys/kdi.h>
113 #endif
114 #include <sys/zone.h>
115 #include <sys/socket.h>
116 #include <netinet/in.h>
117
118 /* FreeBSD includes: */
119 #if !defined(sun)
120
121 #include <sys/ctype.h>
122 #include <sys/limits.h>
123 //#include <sys/kdb.h>
124 #include <sys/kernel.h>
125 #include <sys/malloc.h>
126 #include <sys/sysctl.h>
127 #include <sys/lock.h>
128 #include <sys/mutex.h>
129 #include <sys/rwlock.h>
130 //#include <sys/sx.h>
131 #include <sys/file.h>
132 #include <sys/filedesc.h>
133 #include <sys/dtrace_bsd.h>
134 #include <sys/vmem.h>
135 #include <sys/module.h>
136 #include <sys/cpu.h>
137 #include <netinet/in.h>
138 #include "dtrace_cddl.h"
139 #include "dtrace_debug.c"
140 #endif
141
142 #if !defined(sun)
143 /* fake module entry for netbsd */
144 module_t *mod_nbsd = NULL;
145 #endif
146
147 /*
148 * DTrace Tunable Variables
149 *
150 * The following variables may be tuned by adding a line to /etc/system that
151 * includes both the name of the DTrace module ("dtrace") and the name of the
152 * variable. For example:
153 *
154 * set dtrace:dtrace_destructive_disallow = 1
155 *
156 * In general, the only variables that one should be tuning this way are those
157 * that affect system-wide DTrace behavior, and for which the default behavior
158 * is undesirable. Most of these variables are tunable on a per-consumer
159 * basis using DTrace options, and need not be tuned on a system-wide basis.
160 * When tuning these variables, avoid pathological values; while some attempt
161 * is made to verify the integrity of these variables, they are not considered
162 * part of the supported interface to DTrace, and they are therefore not
163 * checked comprehensively. Further, these variables should not be tuned
164 * dynamically via "mdb -kw" or other means; they should only be tuned via
165 * /etc/system.
166 */
167 int dtrace_destructive_disallow = 0;
168 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
169 size_t dtrace_difo_maxsize = (256 * 1024);
170 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
171 size_t dtrace_global_maxsize = (16 * 1024);
172 size_t dtrace_actions_max = (16 * 1024);
173 size_t dtrace_retain_max = 1024;
174 dtrace_optval_t dtrace_helper_actions_max = 32;
175 dtrace_optval_t dtrace_helper_providers_max = 32;
176 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
177 size_t dtrace_strsize_default = 256;
178 dtrace_optval_t dtrace_cleanrate_default = 99009900; /* 101 hz */
179 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
180 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
181 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
182 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
183 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
184 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
185 dtrace_optval_t dtrace_nspec_default = 1;
186 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
187 dtrace_optval_t dtrace_stackframes_default = 20;
188 dtrace_optval_t dtrace_ustackframes_default = 20;
189 dtrace_optval_t dtrace_jstackframes_default = 50;
190 dtrace_optval_t dtrace_jstackstrsize_default = 512;
191 int dtrace_msgdsize_max = 128;
192 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
193 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
194 int dtrace_devdepth_max = 32;
195 int dtrace_err_verbose;
196 hrtime_t dtrace_deadman_interval = NANOSEC;
197 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
198 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
199
200 /*
201 * DTrace External Variables
202 *
203 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
204 * available to DTrace consumers via the backtick (`) syntax. One of these,
205 * dtrace_zero, is made deliberately so: it is provided as a source of
206 * well-known, zero-filled memory. While this variable is not documented,
207 * it is used by some translators as an implementation detail.
208 */
209 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
210
211 /*
212 * DTrace Internal Variables
213 */
214 #if defined(sun)
215 static dev_info_t *dtrace_devi; /* device info */
216 #endif
217 static vmem_t *dtrace_arena; /* probe ID arena */
218 #if defined(sun)
219 static vmem_t *dtrace_minor; /* minor number arena */
220 static taskq_t *dtrace_taskq; /* task queue */
221 #endif
222 static dtrace_probe_t **dtrace_probes; /* array of all probes */
223 int dtrace_probes_size=0; /* size for kmem_free */
224 static int dtrace_nprobes; /* number of probes */
225 static dtrace_provider_t *dtrace_provider; /* provider list */
226 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
227 static int dtrace_opens; /* number of opens */
228 static int dtrace_helpers; /* number of helpers */
229 #if defined(sun)
230 static void *dtrace_softstate; /* softstate pointer */
231 #endif
232 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
233 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
234 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
235 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
236 static int dtrace_toxranges; /* number of toxic ranges */
237 static int dtrace_toxranges_max; /* size of toxic range array */
238 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
239 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
240 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
241 static kthread_t *dtrace_panicked; /* panicking thread */
242 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
243 static dtrace_genid_t dtrace_probegen; /* current probe generation */
244 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
245 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
246 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
247 #if !defined(sun)
248 int dtrace_in_probe; /* non-zero if executing a probe */
249 #if defined(__i386__) || defined(__amd64__)
250 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
251 #endif
252 #endif
253
254 /*
255 * DTrace Locking
256 * DTrace is protected by three (relatively coarse-grained) locks:
257 *
258 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
259 * including enabling state, probes, ECBs, consumer state, helper state,
260 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
261 * probe context is lock-free -- synchronization is handled via the
262 * dtrace_sync() cross call mechanism.
263 *
264 * (2) dtrace_provider_lock is required when manipulating provider state, or
265 * when provider state must be held constant.
266 *
267 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
268 * when meta provider state must be held constant.
269 *
270 * The lock ordering between these three locks is dtrace_meta_lock before
271 * dtrace_provider_lock before dtrace_lock. (In particular, there are
272 * several places where dtrace_provider_lock is held by the framework as it
273 * calls into the providers -- which then call back into the framework,
274 * grabbing dtrace_lock.)
275 *
276 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
277 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
278 * role as a coarse-grained lock; it is acquired before both of these locks.
279 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
280 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
281 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
282 * acquired _between_ dtrace_provider_lock and dtrace_lock.
283 */
284 static kmutex_t dtrace_lock; /* probe state lock */
285 static kmutex_t dtrace_provider_lock; /* provider state lock */
286 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
287
288 #if !defined(sun)
289 /* XXX FreeBSD hacks. */
290 static kmutex_t mod_lock;
291
292 #define cr_suid cr_svuid
293 #define cr_sgid cr_svgid
294 #define ipaddr_t in_addr_t
295 #define mod_modname pathname
296 #define vuprintf vprintf
297 #define ttoproc(_a) ((_a)->l_proc)
298 #define crgetzoneid(_a) 0
299 //#define NCPU MAXCPUS
300 #define NCPU ncpu
301 #define SNOCD 0
302 #define CPU_ON_INTR(_a) 0
303
304 #define PRIV_EFFECTIVE (1 << 0)
305 #define PRIV_DTRACE_KERNEL (1 << 1)
306 #define PRIV_DTRACE_PROC (1 << 2)
307 #define PRIV_DTRACE_USER (1 << 3)
308 #define PRIV_PROC_OWNER (1 << 4)
309 #define PRIV_PROC_ZONE (1 << 5)
310 #define PRIV_ALL ~0
311
312 //SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
313 #endif
314
315 #if defined(sun)
316 #define curcpu_id CPU->cpu_id
317 #else
318 #define curcpu_id cpu_number()
319 #endif
320
321
322 /*
323 * DTrace Provider Variables
324 *
325 * These are the variables relating to DTrace as a provider (that is, the
326 * provider of the BEGIN, END, and ERROR probes).
327 */
328 static dtrace_pattr_t dtrace_provider_attr = {
329 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
330 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
331 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
332 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
333 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
334 };
335
336 static void
dtrace_nullop(void)337 dtrace_nullop(void)
338 {}
339
340 static int
dtrace_enable_nullop(void)341 dtrace_enable_nullop(void)
342 {
343 return (0);
344 }
345
346 static dtrace_pops_t dtrace_provider_ops = {
347 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
348 #if defined(sun)
349 (void (*)(void *, modctl_t *))dtrace_nullop,
350 #else
351 (void (*)(void *, dtrace_modctl_t *))dtrace_nullop,
352 #endif
353 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
354 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
355 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
356 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
357 NULL,
358 NULL,
359 NULL,
360 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
361 };
362
363 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
364 static dtrace_id_t dtrace_probeid_end; /* special END probe */
365 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
366
367 /*
368 * DTrace Helper Tracing Variables
369 */
370 uint32_t dtrace_helptrace_next = 0;
371 uint32_t dtrace_helptrace_nlocals;
372 char *dtrace_helptrace_buffer;
373 int dtrace_helptrace_bufsize = 512 * 1024;
374
375 #ifdef DEBUG
376 int dtrace_helptrace_enabled = 1;
377 #else
378 int dtrace_helptrace_enabled = 0;
379 #endif
380
381 /*
382 * DTrace Error Hashing
383 *
384 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
385 * table. This is very useful for checking coverage of tests that are
386 * expected to induce DIF or DOF processing errors, and may be useful for
387 * debugging problems in the DIF code generator or in DOF generation . The
388 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
389 */
390 #ifdef DEBUG
391 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
392 static const char *dtrace_errlast;
393 static kthread_t *dtrace_errthread;
394 static kmutex_t dtrace_errlock;
395 #endif
396
397 /*
398 * DTrace Macros and Constants
399 *
400 * These are various macros that are useful in various spots in the
401 * implementation, along with a few random constants that have no meaning
402 * outside of the implementation. There is no real structure to this cpp
403 * mishmash -- but is there ever?
404 */
405 #define DTRACE_HASHSTR(hash, probe) \
406 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
407
408 #define DTRACE_HASHNEXT(hash, probe) \
409 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
410
411 #define DTRACE_HASHPREV(hash, probe) \
412 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
413
414 #define DTRACE_HASHEQ(hash, lhs, rhs) \
415 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
416 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
417
418 #define DTRACE_AGGHASHSIZE_SLEW 17
419
420 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
421
422 /*
423 * The key for a thread-local variable consists of the lower 61 bits of the
424 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
425 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
426 * equal to a variable identifier. This is necessary (but not sufficient) to
427 * assure that global associative arrays never collide with thread-local
428 * variables. To guarantee that they cannot collide, we must also define the
429 * order for keying dynamic variables. That order is:
430 *
431 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
432 *
433 * Because the variable-key and the tls-key are in orthogonal spaces, there is
434 * no way for a global variable key signature to match a thread-local key
435 * signature.
436 */
437 #if defined(sun)
438 #define DTRACE_TLS_THRKEY(where) { \
439 uint_t intr = 0; \
440 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
441 for (; actv; actv >>= 1) \
442 intr++; \
443 ASSERT(intr < (1 << 3)); \
444 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
445 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
446 }
447 #else
448 #define DTRACE_TLS_THRKEY(where) { \
449 uint_t intr = 0; \
450 (where) = ((curthread->l_lid + (curthread->l_proc->p_pid << 16) + \
451 DIF_VARIABLE_MAX) & \
452 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
453 }
454 #if 0
455 #define DTRACE_TLS_THRKEY(where) { \
456 solaris_cpu_t *_c = &solaris_cpu[curcpu_id]; \
457 uint_t intr = 0; \
458 uint_t actv = _c->cpu_intr_actv; \
459 for (; actv; actv >>= 1) \
460 intr++; \
461 ASSERT(intr < (1 << 3)); \
462 (where) = ((curthread->l_lid + (curthread->l_proc->p_pid << 16) + \
463 DIF_VARIABLE_MAX) & \
464 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
465 }
466 #endif
467 #endif
468
469 #define DT_BSWAP_8(x) ((x) & 0xff)
470 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
471 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
472 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
473
474 #define DT_MASK_LO 0x00000000FFFFFFFFULL
475
476 #define DTRACE_STORE(type, tomax, offset, what) \
477 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
478
479 #ifndef __i386
480 #define DTRACE_ALIGNCHECK(addr, size, flags) \
481 if (addr & (size - 1)) { \
482 *flags |= CPU_DTRACE_BADALIGN; \
483 cpu_core[curcpu_id].cpuc_dtrace_illval = addr; \
484 return (0); \
485 }
486 #else
487 #define DTRACE_ALIGNCHECK(addr, size, flags)
488 #endif
489
490 /*
491 * Test whether a range of memory starting at testaddr of size testsz falls
492 * within the range of memory described by addr, sz. We take care to avoid
493 * problems with overflow and underflow of the unsigned quantities, and
494 * disallow all negative sizes. Ranges of size 0 are allowed.
495 */
496 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
497 ((testaddr) - (baseaddr) < (basesz) && \
498 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
499 (testaddr) + (testsz) >= (testaddr))
500
501 /*
502 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
503 * alloc_sz on the righthand side of the comparison in order to avoid overflow
504 * or underflow in the comparison with it. This is simpler than the INRANGE
505 * check above, because we know that the dtms_scratch_ptr is valid in the
506 * range. Allocations of size zero are allowed.
507 */
508 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
509 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
510 (mstate)->dtms_scratch_ptr >= (alloc_sz))
511
512 #define DTRACE_LOADFUNC(bits) \
513 /*CSTYLED*/ \
514 uint##bits##_t \
515 dtrace_load##bits(uintptr_t addr) \
516 { \
517 size_t size = bits / NBBY; \
518 /*CSTYLED*/ \
519 uint##bits##_t rval; \
520 int i; \
521 volatile uint16_t *flags = (volatile uint16_t *) \
522 &cpu_core[curcpu_id].cpuc_dtrace_flags; \
523 \
524 DTRACE_ALIGNCHECK(addr, size, flags); \
525 \
526 for (i = 0; i < dtrace_toxranges; i++) { \
527 if (addr >= dtrace_toxrange[i].dtt_limit) \
528 continue; \
529 \
530 if (addr + size <= dtrace_toxrange[i].dtt_base) \
531 continue; \
532 \
533 /* \
534 * This address falls within a toxic region; return 0. \
535 */ \
536 *flags |= CPU_DTRACE_BADADDR; \
537 cpu_core[curcpu_id].cpuc_dtrace_illval = addr; \
538 return (0); \
539 } \
540 \
541 *flags |= CPU_DTRACE_NOFAULT; \
542 /*CSTYLED*/ \
543 rval = *((volatile uint##bits##_t *)addr); \
544 *flags &= ~CPU_DTRACE_NOFAULT; \
545 \
546 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
547 }
548
549 #ifdef _LP64
550 #define dtrace_loadptr dtrace_load64
551 #else
552 #define dtrace_loadptr dtrace_load32
553 #endif
554
555 #define DTRACE_DYNHASH_FREE 0
556 #define DTRACE_DYNHASH_SINK 1
557 #define DTRACE_DYNHASH_VALID 2
558
559 #define DTRACE_MATCH_FAIL -1
560 #define DTRACE_MATCH_NEXT 0
561 #define DTRACE_MATCH_DONE 1
562 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
563 #define DTRACE_STATE_ALIGN 64
564
565 #define DTRACE_FLAGS2FLT(flags) \
566 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
567 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
568 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
569 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
570 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
571 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
572 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
573 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
574 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
575 DTRACEFLT_UNKNOWN)
576
577 #define DTRACEACT_ISSTRING(act) \
578 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
579 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
580
581 /* Function prototype definitions: */
582 static size_t dtrace_strlen(const char *, size_t);
583 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
584 static void dtrace_enabling_provide(dtrace_provider_t *);
585 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
586 static void dtrace_enabling_matchall(void);
587 static dtrace_state_t *dtrace_anon_grab(void);
588 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
589 dtrace_state_t *, uint64_t, uint64_t);
590 #if defined(sun)
591 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
592 #endif
593 static void dtrace_buffer_drop(dtrace_buffer_t *);
594 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
595 dtrace_state_t *, dtrace_mstate_t *);
596 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
597 dtrace_optval_t);
598 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
599 #if defined(sun)
600 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
601 #endif
602 uint16_t dtrace_load16(uintptr_t);
603 uint32_t dtrace_load32(uintptr_t);
604 uint64_t dtrace_load64(uintptr_t);
605 uint8_t dtrace_load8(uintptr_t);
606 void dtrace_dynvar_clean(dtrace_dstate_t *);
607 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
608 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
609 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
610
611 /*
612 * DTrace Probe Context Functions
613 *
614 * These functions are called from probe context. Because probe context is
615 * any context in which C may be called, arbitrarily locks may be held,
616 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
617 * As a result, functions called from probe context may only call other DTrace
618 * support functions -- they may not interact at all with the system at large.
619 * (Note that the ASSERT macro is made probe-context safe by redefining it in
620 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
621 * loads are to be performed from probe context, they _must_ be in terms of
622 * the safe dtrace_load*() variants.
623 *
624 * Some functions in this block are not actually called from probe context;
625 * for these functions, there will be a comment above the function reading
626 * "Note: not called from probe context."
627 */
628 void
dtrace_panic(const char * format,...)629 dtrace_panic(const char *format, ...)
630 {
631 va_list alist;
632
633 va_start(alist, format);
634 #ifdef __NetBSD__
635 vpanic(format, alist);
636 #else
637 dtrace_vpanic(format, alist);
638 #endif
639 va_end(alist);
640 }
641
642 int
dtrace_assfail(const char * a,const char * f,int l)643 dtrace_assfail(const char *a, const char *f, int l)
644 {
645 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
646
647 /*
648 * We just need something here that even the most clever compiler
649 * cannot optimize away.
650 */
651 return (a[(uintptr_t)f]);
652 }
653
654 /*
655 * Atomically increment a specified error counter from probe context.
656 */
657 static void
dtrace_error(uint32_t * counter)658 dtrace_error(uint32_t *counter)
659 {
660 /*
661 * Most counters stored to in probe context are per-CPU counters.
662 * However, there are some error conditions that are sufficiently
663 * arcane that they don't merit per-CPU storage. If these counters
664 * are incremented concurrently on different CPUs, scalability will be
665 * adversely affected -- but we don't expect them to be white-hot in a
666 * correctly constructed enabling...
667 */
668 uint32_t oval, nval;
669
670 do {
671 oval = *counter;
672
673 if ((nval = oval + 1) == 0) {
674 /*
675 * If the counter would wrap, set it to 1 -- assuring
676 * that the counter is never zero when we have seen
677 * errors. (The counter must be 32-bits because we
678 * aren't guaranteed a 64-bit compare&swap operation.)
679 * To save this code both the infamy of being fingered
680 * by a priggish news story and the indignity of being
681 * the target of a neo-puritan witch trial, we're
682 * carefully avoiding any colorful description of the
683 * likelihood of this condition -- but suffice it to
684 * say that it is only slightly more likely than the
685 * overflow of predicate cache IDs, as discussed in
686 * dtrace_predicate_create().
687 */
688 nval = 1;
689 }
690 } while (dtrace_cas32(counter, oval, nval) != oval);
691 }
692
693 /*
694 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
695 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
696 */
697 DTRACE_LOADFUNC(8)
698 DTRACE_LOADFUNC(16)
699 DTRACE_LOADFUNC(32)
700 DTRACE_LOADFUNC(64)
701
702 static int
dtrace_inscratch(uintptr_t dest,size_t size,dtrace_mstate_t * mstate)703 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
704 {
705 if (dest < mstate->dtms_scratch_base)
706 return (0);
707
708 if (dest + size < dest)
709 return (0);
710
711 if (dest + size > mstate->dtms_scratch_ptr)
712 return (0);
713
714 return (1);
715 }
716
717 static int
dtrace_canstore_statvar(uint64_t addr,size_t sz,dtrace_statvar_t ** svars,int nsvars)718 dtrace_canstore_statvar(uint64_t addr, size_t sz,
719 dtrace_statvar_t **svars, int nsvars)
720 {
721 int i;
722
723 for (i = 0; i < nsvars; i++) {
724 dtrace_statvar_t *svar = svars[i];
725
726 if (svar == NULL || svar->dtsv_size == 0)
727 continue;
728
729 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
730 return (1);
731 }
732
733 return (0);
734 }
735
736 /*
737 * Check to see if the address is within a memory region to which a store may
738 * be issued. This includes the DTrace scratch areas, and any DTrace variable
739 * region. The caller of dtrace_canstore() is responsible for performing any
740 * alignment checks that are needed before stores are actually executed.
741 */
742 static int
dtrace_canstore(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)743 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
744 dtrace_vstate_t *vstate)
745 {
746 /*
747 * First, check to see if the address is in scratch space...
748 */
749 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
750 mstate->dtms_scratch_size))
751 return (1);
752
753 /*
754 * Now check to see if it's a dynamic variable. This check will pick
755 * up both thread-local variables and any global dynamically-allocated
756 * variables.
757 */
758 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
759 vstate->dtvs_dynvars.dtds_size)) {
760 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
761 uintptr_t base = (uintptr_t)dstate->dtds_base +
762 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
763 uintptr_t chunkoffs;
764
765 /*
766 * Before we assume that we can store here, we need to make
767 * sure that it isn't in our metadata -- storing to our
768 * dynamic variable metadata would corrupt our state. For
769 * the range to not include any dynamic variable metadata,
770 * it must:
771 *
772 * (1) Start above the hash table that is at the base of
773 * the dynamic variable space
774 *
775 * (2) Have a starting chunk offset that is beyond the
776 * dtrace_dynvar_t that is at the base of every chunk
777 *
778 * (3) Not span a chunk boundary
779 *
780 */
781 if (addr < base)
782 return (0);
783
784 chunkoffs = (addr - base) % dstate->dtds_chunksize;
785
786 if (chunkoffs < sizeof (dtrace_dynvar_t))
787 return (0);
788
789 if (chunkoffs + sz > dstate->dtds_chunksize)
790 return (0);
791
792 return (1);
793 }
794
795 /*
796 * Finally, check the static local and global variables. These checks
797 * take the longest, so we perform them last.
798 */
799 if (dtrace_canstore_statvar(addr, sz,
800 vstate->dtvs_locals, vstate->dtvs_nlocals))
801 return (1);
802
803 if (dtrace_canstore_statvar(addr, sz,
804 vstate->dtvs_globals, vstate->dtvs_nglobals))
805 return (1);
806
807 return (0);
808 }
809
810
811 /*
812 * Convenience routine to check to see if the address is within a memory
813 * region in which a load may be issued given the user's privilege level;
814 * if not, it sets the appropriate error flags and loads 'addr' into the
815 * illegal value slot.
816 *
817 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
818 * appropriate memory access protection.
819 */
820 static int
dtrace_canload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)821 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
822 dtrace_vstate_t *vstate)
823 {
824 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval;
825
826 /*
827 * If we hold the privilege to read from kernel memory, then
828 * everything is readable.
829 */
830 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
831 return (1);
832
833 /*
834 * You can obviously read that which you can store.
835 */
836 if (dtrace_canstore(addr, sz, mstate, vstate))
837 return (1);
838
839 /*
840 * We're allowed to read from our own string table.
841 */
842 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
843 mstate->dtms_difo->dtdo_strlen))
844 return (1);
845
846 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
847 *illval = addr;
848 return (0);
849 }
850
851 /*
852 * Convenience routine to check to see if a given string is within a memory
853 * region in which a load may be issued given the user's privilege level;
854 * this exists so that we don't need to issue unnecessary dtrace_strlen()
855 * calls in the event that the user has all privileges.
856 */
857 static int
dtrace_strcanload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)858 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
859 dtrace_vstate_t *vstate)
860 {
861 size_t strsz;
862
863 /*
864 * If we hold the privilege to read from kernel memory, then
865 * everything is readable.
866 */
867 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
868 return (1);
869
870 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
871 if (dtrace_canload(addr, strsz, mstate, vstate))
872 return (1);
873
874 return (0);
875 }
876
877 /*
878 * Convenience routine to check to see if a given variable is within a memory
879 * region in which a load may be issued given the user's privilege level.
880 */
881 static int
dtrace_vcanload(void * src,dtrace_diftype_t * type,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)882 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
883 dtrace_vstate_t *vstate)
884 {
885 size_t sz;
886 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
887
888 /*
889 * If we hold the privilege to read from kernel memory, then
890 * everything is readable.
891 */
892 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
893 return (1);
894
895 if (type->dtdt_kind == DIF_TYPE_STRING)
896 sz = dtrace_strlen(src,
897 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
898 else
899 sz = type->dtdt_size;
900
901 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
902 }
903
904 /*
905 * Compare two strings using safe loads.
906 */
907 static int
dtrace_strncmp(char * s1,char * s2,size_t limit)908 dtrace_strncmp(char *s1, char *s2, size_t limit)
909 {
910 uint8_t c1, c2;
911 volatile uint16_t *flags;
912
913 if (s1 == s2 || limit == 0)
914 return (0);
915
916 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags;
917
918 do {
919 if (s1 == NULL) {
920 c1 = '\0';
921 } else {
922 c1 = dtrace_load8((uintptr_t)s1++);
923 }
924
925 if (s2 == NULL) {
926 c2 = '\0';
927 } else {
928 c2 = dtrace_load8((uintptr_t)s2++);
929 }
930
931 if (c1 != c2)
932 return (c1 - c2);
933 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
934
935 return (0);
936 }
937
938 /*
939 * Compute strlen(s) for a string using safe memory accesses. The additional
940 * len parameter is used to specify a maximum length to ensure completion.
941 */
942 static size_t
dtrace_strlen(const char * s,size_t lim)943 dtrace_strlen(const char *s, size_t lim)
944 {
945 uint_t len;
946
947 for (len = 0; len != lim; len++) {
948 if (dtrace_load8((uintptr_t)s++) == '\0')
949 break;
950 }
951
952 return (len);
953 }
954
955 /*
956 * Check if an address falls within a toxic region.
957 */
958 static int
dtrace_istoxic(uintptr_t kaddr,size_t size)959 dtrace_istoxic(uintptr_t kaddr, size_t size)
960 {
961 uintptr_t taddr, tsize;
962 int i;
963
964 for (i = 0; i < dtrace_toxranges; i++) {
965 taddr = dtrace_toxrange[i].dtt_base;
966 tsize = dtrace_toxrange[i].dtt_limit - taddr;
967
968 if (kaddr - taddr < tsize) {
969 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
970 cpu_core[curcpu_id].cpuc_dtrace_illval = kaddr;
971 return (1);
972 }
973
974 if (taddr - kaddr < size) {
975 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
976 cpu_core[curcpu_id].cpuc_dtrace_illval = taddr;
977 return (1);
978 }
979 }
980
981 return (0);
982 }
983
984 /*
985 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
986 * memory specified by the DIF program. The dst is assumed to be safe memory
987 * that we can store to directly because it is managed by DTrace. As with
988 * standard bcopy, overlapping copies are handled properly.
989 */
990 static void
dtrace_bcopy(const void * src,void * dst,size_t len)991 dtrace_bcopy(const void *src, void *dst, size_t len)
992 {
993 if (len != 0) {
994 uint8_t *s1 = dst;
995 const uint8_t *s2 = src;
996
997 if (s1 <= s2) {
998 do {
999 *s1++ = dtrace_load8((uintptr_t)s2++);
1000 } while (--len != 0);
1001 } else {
1002 s2 += len;
1003 s1 += len;
1004
1005 do {
1006 *--s1 = dtrace_load8((uintptr_t)--s2);
1007 } while (--len != 0);
1008 }
1009 }
1010 }
1011
1012 /*
1013 * Copy src to dst using safe memory accesses, up to either the specified
1014 * length, or the point that a nul byte is encountered. The src is assumed to
1015 * be unsafe memory specified by the DIF program. The dst is assumed to be
1016 * safe memory that we can store to directly because it is managed by DTrace.
1017 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1018 */
1019 static void
dtrace_strcpy(const void * src,void * dst,size_t len)1020 dtrace_strcpy(const void *src, void *dst, size_t len)
1021 {
1022 if (len != 0) {
1023 uint8_t *s1 = dst, c;
1024 const uint8_t *s2 = src;
1025
1026 do {
1027 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1028 } while (--len != 0 && c != '\0');
1029 }
1030 }
1031
1032 /*
1033 * Copy src to dst, deriving the size and type from the specified (BYREF)
1034 * variable type. The src is assumed to be unsafe memory specified by the DIF
1035 * program. The dst is assumed to be DTrace variable memory that is of the
1036 * specified type; we assume that we can store to directly.
1037 */
1038 static void
dtrace_vcopy(void * src,void * dst,dtrace_diftype_t * type)1039 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
1040 {
1041 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1042
1043 if (type->dtdt_kind == DIF_TYPE_STRING) {
1044 dtrace_strcpy(src, dst, type->dtdt_size);
1045 } else {
1046 dtrace_bcopy(src, dst, type->dtdt_size);
1047 }
1048 }
1049
1050 /*
1051 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1052 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1053 * safe memory that we can access directly because it is managed by DTrace.
1054 */
1055 static int
dtrace_bcmp(const void * s1,const void * s2,size_t len)1056 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1057 {
1058 volatile uint16_t *flags;
1059
1060 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags;
1061
1062 if (s1 == s2)
1063 return (0);
1064
1065 if (s1 == NULL || s2 == NULL)
1066 return (1);
1067
1068 if (s1 != s2 && len != 0) {
1069 const uint8_t *ps1 = s1;
1070 const uint8_t *ps2 = s2;
1071
1072 do {
1073 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1074 return (1);
1075 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1076 }
1077 return (0);
1078 }
1079
1080 /*
1081 * Zero the specified region using a simple byte-by-byte loop. Note that this
1082 * is for safe DTrace-managed memory only.
1083 */
1084 static void
dtrace_bzero(void * dst,size_t len)1085 dtrace_bzero(void *dst, size_t len)
1086 {
1087 uchar_t *cp;
1088
1089 for (cp = dst; len != 0; len--)
1090 *cp++ = 0;
1091 }
1092
1093 static void
dtrace_add_128(uint64_t * addend1,uint64_t * addend2,uint64_t * sum)1094 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1095 {
1096 uint64_t result[2];
1097
1098 result[0] = addend1[0] + addend2[0];
1099 result[1] = addend1[1] + addend2[1] +
1100 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1101
1102 sum[0] = result[0];
1103 sum[1] = result[1];
1104 }
1105
1106 /*
1107 * Shift the 128-bit value in a by b. If b is positive, shift left.
1108 * If b is negative, shift right.
1109 */
1110 static void
dtrace_shift_128(uint64_t * a,int b)1111 dtrace_shift_128(uint64_t *a, int b)
1112 {
1113 uint64_t mask;
1114
1115 if (b == 0)
1116 return;
1117
1118 if (b < 0) {
1119 b = -b;
1120 if (b >= 64) {
1121 a[0] = a[1] >> (b - 64);
1122 a[1] = 0;
1123 } else {
1124 a[0] >>= b;
1125 mask = 1LL << (64 - b);
1126 mask -= 1;
1127 a[0] |= ((a[1] & mask) << (64 - b));
1128 a[1] >>= b;
1129 }
1130 } else {
1131 if (b >= 64) {
1132 a[1] = a[0] << (b - 64);
1133 a[0] = 0;
1134 } else {
1135 a[1] <<= b;
1136 mask = a[0] >> (64 - b);
1137 a[1] |= mask;
1138 a[0] <<= b;
1139 }
1140 }
1141 }
1142
1143 /*
1144 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1145 * use native multiplication on those, and then re-combine into the
1146 * resulting 128-bit value.
1147 *
1148 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1149 * hi1 * hi2 << 64 +
1150 * hi1 * lo2 << 32 +
1151 * hi2 * lo1 << 32 +
1152 * lo1 * lo2
1153 */
1154 static void
dtrace_multiply_128(uint64_t factor1,uint64_t factor2,uint64_t * product)1155 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1156 {
1157 uint64_t hi1, hi2, lo1, lo2;
1158 uint64_t tmp[2];
1159
1160 hi1 = factor1 >> 32;
1161 hi2 = factor2 >> 32;
1162
1163 lo1 = factor1 & DT_MASK_LO;
1164 lo2 = factor2 & DT_MASK_LO;
1165
1166 product[0] = lo1 * lo2;
1167 product[1] = hi1 * hi2;
1168
1169 tmp[0] = hi1 * lo2;
1170 tmp[1] = 0;
1171 dtrace_shift_128(tmp, 32);
1172 dtrace_add_128(product, tmp, product);
1173
1174 tmp[0] = hi2 * lo1;
1175 tmp[1] = 0;
1176 dtrace_shift_128(tmp, 32);
1177 dtrace_add_128(product, tmp, product);
1178 }
1179
1180 /*
1181 * This privilege check should be used by actions and subroutines to
1182 * verify that the user credentials of the process that enabled the
1183 * invoking ECB match the target credentials
1184 */
1185 static int
dtrace_priv_proc_common_user(dtrace_state_t * state)1186 dtrace_priv_proc_common_user(dtrace_state_t *state)
1187 {
1188 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1189
1190 /*
1191 * We should always have a non-NULL state cred here, since if cred
1192 * is null (anonymous tracing), we fast-path bypass this routine.
1193 */
1194 ASSERT(s_cr != NULL);
1195
1196 #if defined(sun)
1197 if ((cr = CRED()) != NULL &&
1198 s_cr->cr_uid == cr->cr_uid &&
1199 s_cr->cr_uid == cr->cr_ruid &&
1200 s_cr->cr_uid == cr->cr_suid &&
1201 s_cr->cr_gid == cr->cr_gid &&
1202 s_cr->cr_gid == cr->cr_rgid &&
1203 s_cr->cr_gid == cr->cr_sgid)
1204 return (1);
1205 #else
1206 if ((cr = CRED()) != NULL) {
1207 uid_t uid;
1208 gid_t gid;
1209
1210 uid = kauth_cred_getuid(s_cr);
1211 gid = kauth_cred_getgid(s_cr);
1212
1213 if (uid == kauth_cred_getuid(cr) &&
1214 uid == kauth_cred_geteuid(cr) &&
1215 uid == kauth_cred_getsvuid(cr) &&
1216 gid == kauth_cred_getgid(cr) &&
1217 gid == kauth_cred_getegid(cr) &&
1218 gid == kauth_cred_getsvgid(cr)) {
1219 return 1;
1220 }
1221 }
1222 #endif
1223
1224 return (0);
1225 }
1226
1227 /*
1228 * This privilege check should be used by actions and subroutines to
1229 * verify that the zone of the process that enabled the invoking ECB
1230 * matches the target credentials
1231 */
1232 static int
dtrace_priv_proc_common_zone(dtrace_state_t * state)1233 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1234 {
1235 #if defined(sun)
1236 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1237
1238 /*
1239 * We should always have a non-NULL state cred here, since if cred
1240 * is null (anonymous tracing), we fast-path bypass this routine.
1241 */
1242 ASSERT(s_cr != NULL);
1243
1244 if ((cr = CRED()) != NULL &&
1245 s_cr->cr_zone == cr->cr_zone)
1246 return (1);
1247
1248 return (0);
1249 #else
1250 return (1);
1251 #endif
1252 }
1253
1254 /*
1255 * This privilege check should be used by actions and subroutines to
1256 * verify that the process has not setuid or changed credentials.
1257 */
1258 static int
dtrace_priv_proc_common_nocd(void)1259 dtrace_priv_proc_common_nocd(void)
1260 {
1261 proc_t *proc;
1262
1263 if ((proc = ttoproc(curthread)) != NULL &&
1264 !(proc->p_flag & SNOCD))
1265 return (1);
1266
1267 return (0);
1268 }
1269
1270 static int
dtrace_priv_proc_destructive(dtrace_state_t * state)1271 dtrace_priv_proc_destructive(dtrace_state_t *state)
1272 {
1273 int action = state->dts_cred.dcr_action;
1274
1275 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1276 dtrace_priv_proc_common_zone(state) == 0)
1277 goto bad;
1278
1279 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1280 dtrace_priv_proc_common_user(state) == 0)
1281 goto bad;
1282
1283 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1284 dtrace_priv_proc_common_nocd() == 0)
1285 goto bad;
1286
1287 return (1);
1288
1289 bad:
1290 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1291
1292 return (0);
1293 }
1294
1295 static int
dtrace_priv_proc_control(dtrace_state_t * state)1296 dtrace_priv_proc_control(dtrace_state_t *state)
1297 {
1298 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1299 return (1);
1300
1301 if (dtrace_priv_proc_common_zone(state) &&
1302 dtrace_priv_proc_common_user(state) &&
1303 dtrace_priv_proc_common_nocd())
1304 return (1);
1305
1306 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1307
1308 return (0);
1309 }
1310
1311 static int
dtrace_priv_proc(dtrace_state_t * state)1312 dtrace_priv_proc(dtrace_state_t *state)
1313 {
1314 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1315 return (1);
1316
1317 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1318
1319 return (0);
1320 }
1321
1322 static int
dtrace_priv_kernel(dtrace_state_t * state)1323 dtrace_priv_kernel(dtrace_state_t *state)
1324 {
1325 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1326 return (1);
1327
1328 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1329
1330 return (0);
1331 }
1332
1333 static int
dtrace_priv_kernel_destructive(dtrace_state_t * state)1334 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1335 {
1336 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1337 return (1);
1338
1339 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1340
1341 return (0);
1342 }
1343
1344 /*
1345 * Note: not called from probe context. This function is called
1346 * asynchronously (and at a regular interval) from outside of probe context to
1347 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1348 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1349 */
1350 void
dtrace_dynvar_clean(dtrace_dstate_t * dstate)1351 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1352 {
1353 dtrace_dynvar_t *dirty;
1354 dtrace_dstate_percpu_t *dcpu;
1355 int i, work = 0;
1356
1357 for (i = 0; i < NCPU; i++) {
1358 dcpu = &dstate->dtds_percpu[i];
1359
1360 ASSERT(dcpu->dtdsc_rinsing == NULL);
1361
1362 /*
1363 * If the dirty list is NULL, there is no dirty work to do.
1364 */
1365 if (dcpu->dtdsc_dirty == NULL)
1366 continue;
1367
1368 /*
1369 * If the clean list is non-NULL, then we're not going to do
1370 * any work for this CPU -- it means that there has not been
1371 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1372 * since the last time we cleaned house.
1373 */
1374 if (dcpu->dtdsc_clean != NULL)
1375 continue;
1376
1377 work = 1;
1378
1379 /*
1380 * Atomically move the dirty list aside.
1381 */
1382 do {
1383 dirty = dcpu->dtdsc_dirty;
1384
1385 /*
1386 * Before we zap the dirty list, set the rinsing list.
1387 * (This allows for a potential assertion in
1388 * dtrace_dynvar(): if a free dynamic variable appears
1389 * on a hash chain, either the dirty list or the
1390 * rinsing list for some CPU must be non-NULL.)
1391 */
1392 dcpu->dtdsc_rinsing = dirty;
1393 dtrace_membar_producer();
1394 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1395 dirty, NULL) != dirty);
1396 }
1397
1398 if (!work) {
1399 /*
1400 * We have no work to do; we can simply return.
1401 */
1402 return;
1403 }
1404
1405 dtrace_sync();
1406
1407 for (i = 0; i < NCPU; i++) {
1408 dcpu = &dstate->dtds_percpu[i];
1409
1410 if (dcpu->dtdsc_rinsing == NULL)
1411 continue;
1412
1413 /*
1414 * We are now guaranteed that no hash chain contains a pointer
1415 * into this dirty list; we can make it clean.
1416 */
1417 ASSERT(dcpu->dtdsc_clean == NULL);
1418 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1419 dcpu->dtdsc_rinsing = NULL;
1420 }
1421
1422 /*
1423 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1424 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1425 * This prevents a race whereby a CPU incorrectly decides that
1426 * the state should be something other than DTRACE_DSTATE_CLEAN
1427 * after dtrace_dynvar_clean() has completed.
1428 */
1429 dtrace_sync();
1430
1431 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1432 }
1433
1434 /*
1435 * Depending on the value of the op parameter, this function looks-up,
1436 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1437 * allocation is requested, this function will return a pointer to a
1438 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1439 * variable can be allocated. If NULL is returned, the appropriate counter
1440 * will be incremented.
1441 */
1442 dtrace_dynvar_t *
dtrace_dynvar(dtrace_dstate_t * dstate,uint_t nkeys,dtrace_key_t * key,size_t dsize,dtrace_dynvar_op_t op,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1443 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1444 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1445 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1446 {
1447 uint64_t hashval = DTRACE_DYNHASH_VALID;
1448 dtrace_dynhash_t *hash = dstate->dtds_hash;
1449 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1450 processorid_t me = curcpu_id, cpu = me;
1451 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1452 size_t bucket, ksize;
1453 size_t chunksize = dstate->dtds_chunksize;
1454 uintptr_t kdata, lock, nstate;
1455 uint_t i;
1456
1457 ASSERT(nkeys != 0);
1458
1459 /*
1460 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1461 * algorithm. For the by-value portions, we perform the algorithm in
1462 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1463 * bit, and seems to have only a minute effect on distribution. For
1464 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1465 * over each referenced byte. It's painful to do this, but it's much
1466 * better than pathological hash distribution. The efficacy of the
1467 * hashing algorithm (and a comparison with other algorithms) may be
1468 * found by running the ::dtrace_dynstat MDB dcmd.
1469 */
1470 for (i = 0; i < nkeys; i++) {
1471 if (key[i].dttk_size == 0) {
1472 uint64_t val = key[i].dttk_value;
1473
1474 hashval += (val >> 48) & 0xffff;
1475 hashval += (hashval << 10);
1476 hashval ^= (hashval >> 6);
1477
1478 hashval += (val >> 32) & 0xffff;
1479 hashval += (hashval << 10);
1480 hashval ^= (hashval >> 6);
1481
1482 hashval += (val >> 16) & 0xffff;
1483 hashval += (hashval << 10);
1484 hashval ^= (hashval >> 6);
1485
1486 hashval += val & 0xffff;
1487 hashval += (hashval << 10);
1488 hashval ^= (hashval >> 6);
1489 } else {
1490 /*
1491 * This is incredibly painful, but it beats the hell
1492 * out of the alternative.
1493 */
1494 uint64_t j, size = key[i].dttk_size;
1495 uintptr_t base = (uintptr_t)key[i].dttk_value;
1496
1497 if (!dtrace_canload(base, size, mstate, vstate))
1498 break;
1499
1500 for (j = 0; j < size; j++) {
1501 hashval += dtrace_load8(base + j);
1502 hashval += (hashval << 10);
1503 hashval ^= (hashval >> 6);
1504 }
1505 }
1506 }
1507
1508 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1509 return (NULL);
1510
1511 hashval += (hashval << 3);
1512 hashval ^= (hashval >> 11);
1513 hashval += (hashval << 15);
1514
1515 /*
1516 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1517 * comes out to be one of our two sentinel hash values. If this
1518 * actually happens, we set the hashval to be a value known to be a
1519 * non-sentinel value.
1520 */
1521 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1522 hashval = DTRACE_DYNHASH_VALID;
1523
1524 /*
1525 * Yes, it's painful to do a divide here. If the cycle count becomes
1526 * important here, tricks can be pulled to reduce it. (However, it's
1527 * critical that hash collisions be kept to an absolute minimum;
1528 * they're much more painful than a divide.) It's better to have a
1529 * solution that generates few collisions and still keeps things
1530 * relatively simple.
1531 */
1532 bucket = hashval % dstate->dtds_hashsize;
1533
1534 if (op == DTRACE_DYNVAR_DEALLOC) {
1535 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1536
1537 for (;;) {
1538 while ((lock = *lockp) & 1)
1539 continue;
1540
1541 if (dtrace_casptr((volatile void *)lockp,
1542 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1543 break;
1544 }
1545
1546 dtrace_membar_producer();
1547 }
1548
1549 top:
1550 prev = NULL;
1551 lock = hash[bucket].dtdh_lock;
1552
1553 dtrace_membar_consumer();
1554
1555 start = hash[bucket].dtdh_chain;
1556 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1557 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1558 op != DTRACE_DYNVAR_DEALLOC));
1559
1560 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1561 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1562 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1563
1564 if (dvar->dtdv_hashval != hashval) {
1565 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1566 /*
1567 * We've reached the sink, and therefore the
1568 * end of the hash chain; we can kick out of
1569 * the loop knowing that we have seen a valid
1570 * snapshot of state.
1571 */
1572 ASSERT(dvar->dtdv_next == NULL);
1573 ASSERT(dvar == &dtrace_dynhash_sink);
1574 break;
1575 }
1576
1577 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1578 /*
1579 * We've gone off the rails: somewhere along
1580 * the line, one of the members of this hash
1581 * chain was deleted. Note that we could also
1582 * detect this by simply letting this loop run
1583 * to completion, as we would eventually hit
1584 * the end of the dirty list. However, we
1585 * want to avoid running the length of the
1586 * dirty list unnecessarily (it might be quite
1587 * long), so we catch this as early as
1588 * possible by detecting the hash marker. In
1589 * this case, we simply set dvar to NULL and
1590 * break; the conditional after the loop will
1591 * send us back to top.
1592 */
1593 dvar = NULL;
1594 break;
1595 }
1596
1597 goto next;
1598 }
1599
1600 if (dtuple->dtt_nkeys != nkeys)
1601 goto next;
1602
1603 for (i = 0; i < nkeys; i++, dkey++) {
1604 if (dkey->dttk_size != key[i].dttk_size)
1605 goto next; /* size or type mismatch */
1606
1607 if (dkey->dttk_size != 0) {
1608 if (dtrace_bcmp(
1609 (void *)(uintptr_t)key[i].dttk_value,
1610 (void *)(uintptr_t)dkey->dttk_value,
1611 dkey->dttk_size))
1612 goto next;
1613 } else {
1614 if (dkey->dttk_value != key[i].dttk_value)
1615 goto next;
1616 }
1617 }
1618
1619 if (op != DTRACE_DYNVAR_DEALLOC)
1620 return (dvar);
1621
1622 ASSERT(dvar->dtdv_next == NULL ||
1623 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1624
1625 if (prev != NULL) {
1626 ASSERT(hash[bucket].dtdh_chain != dvar);
1627 ASSERT(start != dvar);
1628 ASSERT(prev->dtdv_next == dvar);
1629 prev->dtdv_next = dvar->dtdv_next;
1630 } else {
1631 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1632 start, dvar->dtdv_next) != start) {
1633 /*
1634 * We have failed to atomically swing the
1635 * hash table head pointer, presumably because
1636 * of a conflicting allocation on another CPU.
1637 * We need to reread the hash chain and try
1638 * again.
1639 */
1640 goto top;
1641 }
1642 }
1643
1644 dtrace_membar_producer();
1645
1646 /*
1647 * Now set the hash value to indicate that it's free.
1648 */
1649 ASSERT(hash[bucket].dtdh_chain != dvar);
1650 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1651
1652 dtrace_membar_producer();
1653
1654 /*
1655 * Set the next pointer to point at the dirty list, and
1656 * atomically swing the dirty pointer to the newly freed dvar.
1657 */
1658 do {
1659 next = dcpu->dtdsc_dirty;
1660 dvar->dtdv_next = next;
1661 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1662
1663 /*
1664 * Finally, unlock this hash bucket.
1665 */
1666 ASSERT(hash[bucket].dtdh_lock == lock);
1667 ASSERT(lock & 1);
1668 hash[bucket].dtdh_lock++;
1669
1670 return (NULL);
1671 next:
1672 prev = dvar;
1673 continue;
1674 }
1675
1676 if (dvar == NULL) {
1677 /*
1678 * If dvar is NULL, it is because we went off the rails:
1679 * one of the elements that we traversed in the hash chain
1680 * was deleted while we were traversing it. In this case,
1681 * we assert that we aren't doing a dealloc (deallocs lock
1682 * the hash bucket to prevent themselves from racing with
1683 * one another), and retry the hash chain traversal.
1684 */
1685 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1686 goto top;
1687 }
1688
1689 if (op != DTRACE_DYNVAR_ALLOC) {
1690 /*
1691 * If we are not to allocate a new variable, we want to
1692 * return NULL now. Before we return, check that the value
1693 * of the lock word hasn't changed. If it has, we may have
1694 * seen an inconsistent snapshot.
1695 */
1696 if (op == DTRACE_DYNVAR_NOALLOC) {
1697 if (hash[bucket].dtdh_lock != lock)
1698 goto top;
1699 } else {
1700 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1701 ASSERT(hash[bucket].dtdh_lock == lock);
1702 ASSERT(lock & 1);
1703 hash[bucket].dtdh_lock++;
1704 }
1705
1706 return (NULL);
1707 }
1708
1709 /*
1710 * We need to allocate a new dynamic variable. The size we need is the
1711 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1712 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1713 * the size of any referred-to data (dsize). We then round the final
1714 * size up to the chunksize for allocation.
1715 */
1716 for (ksize = 0, i = 0; i < nkeys; i++)
1717 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1718
1719 /*
1720 * This should be pretty much impossible, but could happen if, say,
1721 * strange DIF specified the tuple. Ideally, this should be an
1722 * assertion and not an error condition -- but that requires that the
1723 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1724 * bullet-proof. (That is, it must not be able to be fooled by
1725 * malicious DIF.) Given the lack of backwards branches in DIF,
1726 * solving this would presumably not amount to solving the Halting
1727 * Problem -- but it still seems awfully hard.
1728 */
1729 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1730 ksize + dsize > chunksize) {
1731 dcpu->dtdsc_drops++;
1732 return (NULL);
1733 }
1734
1735 nstate = DTRACE_DSTATE_EMPTY;
1736
1737 do {
1738 retry:
1739 free = dcpu->dtdsc_free;
1740
1741 if (free == NULL) {
1742 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1743 void *rval;
1744
1745 if (clean == NULL) {
1746 /*
1747 * We're out of dynamic variable space on
1748 * this CPU. Unless we have tried all CPUs,
1749 * we'll try to allocate from a different
1750 * CPU.
1751 */
1752 switch (dstate->dtds_state) {
1753 case DTRACE_DSTATE_CLEAN: {
1754 void *sp = &dstate->dtds_state;
1755
1756 if (++cpu >= NCPU)
1757 cpu = 0;
1758
1759 if (dcpu->dtdsc_dirty != NULL &&
1760 nstate == DTRACE_DSTATE_EMPTY)
1761 nstate = DTRACE_DSTATE_DIRTY;
1762
1763 if (dcpu->dtdsc_rinsing != NULL)
1764 nstate = DTRACE_DSTATE_RINSING;
1765
1766 dcpu = &dstate->dtds_percpu[cpu];
1767
1768 if (cpu != me)
1769 goto retry;
1770
1771 (void) dtrace_cas32(sp,
1772 DTRACE_DSTATE_CLEAN, nstate);
1773
1774 /*
1775 * To increment the correct bean
1776 * counter, take another lap.
1777 */
1778 goto retry;
1779 }
1780
1781 case DTRACE_DSTATE_DIRTY:
1782 dcpu->dtdsc_dirty_drops++;
1783 break;
1784
1785 case DTRACE_DSTATE_RINSING:
1786 dcpu->dtdsc_rinsing_drops++;
1787 break;
1788
1789 case DTRACE_DSTATE_EMPTY:
1790 dcpu->dtdsc_drops++;
1791 break;
1792 }
1793
1794 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1795 return (NULL);
1796 }
1797
1798 /*
1799 * The clean list appears to be non-empty. We want to
1800 * move the clean list to the free list; we start by
1801 * moving the clean pointer aside.
1802 */
1803 if (dtrace_casptr(&dcpu->dtdsc_clean,
1804 clean, NULL) != clean) {
1805 /*
1806 * We are in one of two situations:
1807 *
1808 * (a) The clean list was switched to the
1809 * free list by another CPU.
1810 *
1811 * (b) The clean list was added to by the
1812 * cleansing cyclic.
1813 *
1814 * In either of these situations, we can
1815 * just reattempt the free list allocation.
1816 */
1817 goto retry;
1818 }
1819
1820 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1821
1822 /*
1823 * Now we'll move the clean list to the free list.
1824 * It's impossible for this to fail: the only way
1825 * the free list can be updated is through this
1826 * code path, and only one CPU can own the clean list.
1827 * Thus, it would only be possible for this to fail if
1828 * this code were racing with dtrace_dynvar_clean().
1829 * (That is, if dtrace_dynvar_clean() updated the clean
1830 * list, and we ended up racing to update the free
1831 * list.) This race is prevented by the dtrace_sync()
1832 * in dtrace_dynvar_clean() -- which flushes the
1833 * owners of the clean lists out before resetting
1834 * the clean lists.
1835 */
1836 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1837 ASSERT(rval == NULL);
1838 goto retry;
1839 }
1840
1841 dvar = free;
1842 new_free = dvar->dtdv_next;
1843 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1844
1845 /*
1846 * We have now allocated a new chunk. We copy the tuple keys into the
1847 * tuple array and copy any referenced key data into the data space
1848 * following the tuple array. As we do this, we relocate dttk_value
1849 * in the final tuple to point to the key data address in the chunk.
1850 */
1851 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1852 dvar->dtdv_data = (void *)(kdata + ksize);
1853 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1854
1855 for (i = 0; i < nkeys; i++) {
1856 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1857 size_t kesize = key[i].dttk_size;
1858
1859 if (kesize != 0) {
1860 dtrace_bcopy(
1861 (const void *)(uintptr_t)key[i].dttk_value,
1862 (void *)kdata, kesize);
1863 dkey->dttk_value = kdata;
1864 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1865 } else {
1866 dkey->dttk_value = key[i].dttk_value;
1867 }
1868
1869 dkey->dttk_size = kesize;
1870 }
1871
1872 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1873 dvar->dtdv_hashval = hashval;
1874 dvar->dtdv_next = start;
1875
1876 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1877 return (dvar);
1878
1879 /*
1880 * The cas has failed. Either another CPU is adding an element to
1881 * this hash chain, or another CPU is deleting an element from this
1882 * hash chain. The simplest way to deal with both of these cases
1883 * (though not necessarily the most efficient) is to free our
1884 * allocated block and tail-call ourselves. Note that the free is
1885 * to the dirty list and _not_ to the free list. This is to prevent
1886 * races with allocators, above.
1887 */
1888 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1889
1890 dtrace_membar_producer();
1891
1892 do {
1893 free = dcpu->dtdsc_dirty;
1894 dvar->dtdv_next = free;
1895 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1896
1897 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1898 }
1899
1900 /*ARGSUSED*/
1901 static void
dtrace_aggregate_min(uint64_t * oval,uint64_t nval,uint64_t arg)1902 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1903 {
1904 if ((int64_t)nval < (int64_t)*oval)
1905 *oval = nval;
1906 }
1907
1908 /*ARGSUSED*/
1909 static void
dtrace_aggregate_max(uint64_t * oval,uint64_t nval,uint64_t arg)1910 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1911 {
1912 if ((int64_t)nval > (int64_t)*oval)
1913 *oval = nval;
1914 }
1915
1916 static void
dtrace_aggregate_quantize(uint64_t * quanta,uint64_t nval,uint64_t incr)1917 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1918 {
1919 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1920 int64_t val = (int64_t)nval;
1921
1922 if (val < 0) {
1923 for (i = 0; i < zero; i++) {
1924 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1925 quanta[i] += incr;
1926 return;
1927 }
1928 }
1929 } else {
1930 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1931 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1932 quanta[i - 1] += incr;
1933 return;
1934 }
1935 }
1936
1937 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1938 return;
1939 }
1940
1941 ASSERT(0);
1942 }
1943
1944 static void
dtrace_aggregate_lquantize(uint64_t * lquanta,uint64_t nval,uint64_t incr)1945 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1946 {
1947 uint64_t arg = *lquanta++;
1948 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1949 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1950 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1951 int32_t val = (int32_t)nval, level;
1952
1953 ASSERT(step != 0);
1954 ASSERT(levels != 0);
1955
1956 if (val < base) {
1957 /*
1958 * This is an underflow.
1959 */
1960 lquanta[0] += incr;
1961 return;
1962 }
1963
1964 level = (val - base) / step;
1965
1966 if (level < levels) {
1967 lquanta[level + 1] += incr;
1968 return;
1969 }
1970
1971 /*
1972 * This is an overflow.
1973 */
1974 lquanta[levels + 1] += incr;
1975 }
1976
1977 /*ARGSUSED*/
1978 static void
dtrace_aggregate_avg(uint64_t * data,uint64_t nval,uint64_t arg)1979 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1980 {
1981 data[0]++;
1982 data[1] += nval;
1983 }
1984
1985 /*ARGSUSED*/
1986 static void
dtrace_aggregate_stddev(uint64_t * data,uint64_t nval,uint64_t arg)1987 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1988 {
1989 int64_t snval = (int64_t)nval;
1990 uint64_t tmp[2];
1991
1992 data[0]++;
1993 data[1] += nval;
1994
1995 /*
1996 * What we want to say here is:
1997 *
1998 * data[2] += nval * nval;
1999 *
2000 * But given that nval is 64-bit, we could easily overflow, so
2001 * we do this as 128-bit arithmetic.
2002 */
2003 if (snval < 0)
2004 snval = -snval;
2005
2006 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2007 dtrace_add_128(data + 2, tmp, data + 2);
2008 }
2009
2010 /*ARGSUSED*/
2011 static void
dtrace_aggregate_count(uint64_t * oval,uint64_t nval,uint64_t arg)2012 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2013 {
2014 *oval = *oval + 1;
2015 }
2016
2017 /*ARGSUSED*/
2018 static void
dtrace_aggregate_sum(uint64_t * oval,uint64_t nval,uint64_t arg)2019 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2020 {
2021 *oval += nval;
2022 }
2023
2024 /*
2025 * Aggregate given the tuple in the principal data buffer, and the aggregating
2026 * action denoted by the specified dtrace_aggregation_t. The aggregation
2027 * buffer is specified as the buf parameter. This routine does not return
2028 * failure; if there is no space in the aggregation buffer, the data will be
2029 * dropped, and a corresponding counter incremented.
2030 */
2031 static void
dtrace_aggregate(dtrace_aggregation_t * agg,dtrace_buffer_t * dbuf,intptr_t offset,dtrace_buffer_t * buf,uint64_t expr,uint64_t arg)2032 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2033 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2034 {
2035 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2036 uint32_t i, ndx, size, fsize;
2037 uint32_t align = sizeof (uint64_t) - 1;
2038 dtrace_aggbuffer_t *agb;
2039 dtrace_aggkey_t *key;
2040 uint32_t hashval = 0, limit, isstr;
2041 caddr_t tomax, data, kdata;
2042 dtrace_actkind_t action;
2043 dtrace_action_t *act;
2044 uintptr_t offs;
2045
2046 if (buf == NULL)
2047 return;
2048
2049 if (!agg->dtag_hasarg) {
2050 /*
2051 * Currently, only quantize() and lquantize() take additional
2052 * arguments, and they have the same semantics: an increment
2053 * value that defaults to 1 when not present. If additional
2054 * aggregating actions take arguments, the setting of the
2055 * default argument value will presumably have to become more
2056 * sophisticated...
2057 */
2058 arg = 1;
2059 }
2060
2061 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2062 size = rec->dtrd_offset - agg->dtag_base;
2063 fsize = size + rec->dtrd_size;
2064
2065 ASSERT(dbuf->dtb_tomax != NULL);
2066 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2067
2068 if ((tomax = buf->dtb_tomax) == NULL) {
2069 dtrace_buffer_drop(buf);
2070 return;
2071 }
2072
2073 /*
2074 * The metastructure is always at the bottom of the buffer.
2075 */
2076 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2077 sizeof (dtrace_aggbuffer_t));
2078
2079 if (buf->dtb_offset == 0) {
2080 /*
2081 * We just kludge up approximately 1/8th of the size to be
2082 * buckets. If this guess ends up being routinely
2083 * off-the-mark, we may need to dynamically readjust this
2084 * based on past performance.
2085 */
2086 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2087
2088 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2089 (uintptr_t)tomax || hashsize == 0) {
2090 /*
2091 * We've been given a ludicrously small buffer;
2092 * increment our drop count and leave.
2093 */
2094 dtrace_buffer_drop(buf);
2095 return;
2096 }
2097
2098 /*
2099 * And now, a pathetic attempt to try to get a an odd (or
2100 * perchance, a prime) hash size for better hash distribution.
2101 */
2102 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2103 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2104
2105 agb->dtagb_hashsize = hashsize;
2106 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2107 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2108 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2109
2110 for (i = 0; i < agb->dtagb_hashsize; i++)
2111 agb->dtagb_hash[i] = NULL;
2112 }
2113
2114 ASSERT(agg->dtag_first != NULL);
2115 ASSERT(agg->dtag_first->dta_intuple);
2116
2117 /*
2118 * Calculate the hash value based on the key. Note that we _don't_
2119 * include the aggid in the hashing (but we will store it as part of
2120 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2121 * algorithm: a simple, quick algorithm that has no known funnels, and
2122 * gets good distribution in practice. The efficacy of the hashing
2123 * algorithm (and a comparison with other algorithms) may be found by
2124 * running the ::dtrace_aggstat MDB dcmd.
2125 */
2126 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2127 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2128 limit = i + act->dta_rec.dtrd_size;
2129 ASSERT(limit <= size);
2130 isstr = DTRACEACT_ISSTRING(act);
2131
2132 for (; i < limit; i++) {
2133 hashval += data[i];
2134 hashval += (hashval << 10);
2135 hashval ^= (hashval >> 6);
2136
2137 if (isstr && data[i] == '\0')
2138 break;
2139 }
2140 }
2141
2142 hashval += (hashval << 3);
2143 hashval ^= (hashval >> 11);
2144 hashval += (hashval << 15);
2145
2146 /*
2147 * Yes, the divide here is expensive -- but it's generally the least
2148 * of the performance issues given the amount of data that we iterate
2149 * over to compute hash values, compare data, etc.
2150 */
2151 ndx = hashval % agb->dtagb_hashsize;
2152
2153 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2154 ASSERT((caddr_t)key >= tomax);
2155 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2156
2157 if (hashval != key->dtak_hashval || key->dtak_size != size)
2158 continue;
2159
2160 kdata = key->dtak_data;
2161 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2162
2163 for (act = agg->dtag_first; act->dta_intuple;
2164 act = act->dta_next) {
2165 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2166 limit = i + act->dta_rec.dtrd_size;
2167 ASSERT(limit <= size);
2168 isstr = DTRACEACT_ISSTRING(act);
2169
2170 for (; i < limit; i++) {
2171 if (kdata[i] != data[i])
2172 goto next;
2173
2174 if (isstr && data[i] == '\0')
2175 break;
2176 }
2177 }
2178
2179 if (action != key->dtak_action) {
2180 /*
2181 * We are aggregating on the same value in the same
2182 * aggregation with two different aggregating actions.
2183 * (This should have been picked up in the compiler,
2184 * so we may be dealing with errant or devious DIF.)
2185 * This is an error condition; we indicate as much,
2186 * and return.
2187 */
2188 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2189 return;
2190 }
2191
2192 /*
2193 * This is a hit: we need to apply the aggregator to
2194 * the value at this key.
2195 */
2196 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2197 return;
2198 next:
2199 continue;
2200 }
2201
2202 /*
2203 * We didn't find it. We need to allocate some zero-filled space,
2204 * link it into the hash table appropriately, and apply the aggregator
2205 * to the (zero-filled) value.
2206 */
2207 offs = buf->dtb_offset;
2208 while (offs & (align - 1))
2209 offs += sizeof (uint32_t);
2210
2211 /*
2212 * If we don't have enough room to both allocate a new key _and_
2213 * its associated data, increment the drop count and return.
2214 */
2215 if ((uintptr_t)tomax + offs + fsize >
2216 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2217 dtrace_buffer_drop(buf);
2218 return;
2219 }
2220
2221 /*CONSTCOND*/
2222 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2223 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2224 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2225
2226 key->dtak_data = kdata = tomax + offs;
2227 buf->dtb_offset = offs + fsize;
2228
2229 /*
2230 * Now copy the data across.
2231 */
2232 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2233
2234 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2235 kdata[i] = data[i];
2236
2237 /*
2238 * Because strings are not zeroed out by default, we need to iterate
2239 * looking for actions that store strings, and we need to explicitly
2240 * pad these strings out with zeroes.
2241 */
2242 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2243 int nul;
2244
2245 if (!DTRACEACT_ISSTRING(act))
2246 continue;
2247
2248 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2249 limit = i + act->dta_rec.dtrd_size;
2250 ASSERT(limit <= size);
2251
2252 for (nul = 0; i < limit; i++) {
2253 if (nul) {
2254 kdata[i] = '\0';
2255 continue;
2256 }
2257
2258 if (data[i] != '\0')
2259 continue;
2260
2261 nul = 1;
2262 }
2263 }
2264
2265 for (i = size; i < fsize; i++)
2266 kdata[i] = 0;
2267
2268 key->dtak_hashval = hashval;
2269 key->dtak_size = size;
2270 key->dtak_action = action;
2271 key->dtak_next = agb->dtagb_hash[ndx];
2272 agb->dtagb_hash[ndx] = key;
2273
2274 /*
2275 * Finally, apply the aggregator.
2276 */
2277 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2278 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2279 }
2280
2281 /*
2282 * Given consumer state, this routine finds a speculation in the INACTIVE
2283 * state and transitions it into the ACTIVE state. If there is no speculation
2284 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2285 * incremented -- it is up to the caller to take appropriate action.
2286 */
2287 static int
dtrace_speculation(dtrace_state_t * state)2288 dtrace_speculation(dtrace_state_t *state)
2289 {
2290 int i = 0;
2291 dtrace_speculation_state_t current;
2292 uint32_t *stat = &state->dts_speculations_unavail, count;
2293
2294 while (i < state->dts_nspeculations) {
2295 dtrace_speculation_t *spec = &state->dts_speculations[i];
2296
2297 current = spec->dtsp_state;
2298
2299 if (current != DTRACESPEC_INACTIVE) {
2300 if (current == DTRACESPEC_COMMITTINGMANY ||
2301 current == DTRACESPEC_COMMITTING ||
2302 current == DTRACESPEC_DISCARDING)
2303 stat = &state->dts_speculations_busy;
2304 i++;
2305 continue;
2306 }
2307
2308 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2309 current, DTRACESPEC_ACTIVE) == current)
2310 return (i + 1);
2311 }
2312
2313 /*
2314 * We couldn't find a speculation. If we found as much as a single
2315 * busy speculation buffer, we'll attribute this failure as "busy"
2316 * instead of "unavail".
2317 */
2318 do {
2319 count = *stat;
2320 } while (dtrace_cas32(stat, count, count + 1) != count);
2321
2322 return (0);
2323 }
2324
2325 /*
2326 * This routine commits an active speculation. If the specified speculation
2327 * is not in a valid state to perform a commit(), this routine will silently do
2328 * nothing. The state of the specified speculation is transitioned according
2329 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2330 */
2331 static void
dtrace_speculation_commit(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2332 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2333 dtrace_specid_t which)
2334 {
2335 dtrace_speculation_t *spec;
2336 dtrace_buffer_t *src, *dest;
2337 uintptr_t daddr, saddr, dlimit;
2338 dtrace_speculation_state_t current, new = 0;
2339 intptr_t offs;
2340
2341 if (which == 0)
2342 return;
2343
2344 if (which > state->dts_nspeculations) {
2345 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2346 return;
2347 }
2348
2349 spec = &state->dts_speculations[which - 1];
2350 src = &spec->dtsp_buffer[cpu];
2351 dest = &state->dts_buffer[cpu];
2352
2353 do {
2354 current = spec->dtsp_state;
2355
2356 if (current == DTRACESPEC_COMMITTINGMANY)
2357 break;
2358
2359 switch (current) {
2360 case DTRACESPEC_INACTIVE:
2361 case DTRACESPEC_DISCARDING:
2362 return;
2363
2364 case DTRACESPEC_COMMITTING:
2365 /*
2366 * This is only possible if we are (a) commit()'ing
2367 * without having done a prior speculate() on this CPU
2368 * and (b) racing with another commit() on a different
2369 * CPU. There's nothing to do -- we just assert that
2370 * our offset is 0.
2371 */
2372 ASSERT(src->dtb_offset == 0);
2373 return;
2374
2375 case DTRACESPEC_ACTIVE:
2376 new = DTRACESPEC_COMMITTING;
2377 break;
2378
2379 case DTRACESPEC_ACTIVEONE:
2380 /*
2381 * This speculation is active on one CPU. If our
2382 * buffer offset is non-zero, we know that the one CPU
2383 * must be us. Otherwise, we are committing on a
2384 * different CPU from the speculate(), and we must
2385 * rely on being asynchronously cleaned.
2386 */
2387 if (src->dtb_offset != 0) {
2388 new = DTRACESPEC_COMMITTING;
2389 break;
2390 }
2391 /*FALLTHROUGH*/
2392
2393 case DTRACESPEC_ACTIVEMANY:
2394 new = DTRACESPEC_COMMITTINGMANY;
2395 break;
2396
2397 default:
2398 ASSERT(0);
2399 }
2400 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2401 current, new) != current);
2402
2403 /*
2404 * We have set the state to indicate that we are committing this
2405 * speculation. Now reserve the necessary space in the destination
2406 * buffer.
2407 */
2408 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2409 sizeof (uint64_t), state, NULL)) < 0) {
2410 dtrace_buffer_drop(dest);
2411 goto out;
2412 }
2413
2414 /*
2415 * We have the space; copy the buffer across. (Note that this is a
2416 * highly subobtimal bcopy(); in the unlikely event that this becomes
2417 * a serious performance issue, a high-performance DTrace-specific
2418 * bcopy() should obviously be invented.)
2419 */
2420 daddr = (uintptr_t)dest->dtb_tomax + offs;
2421 dlimit = daddr + src->dtb_offset;
2422 saddr = (uintptr_t)src->dtb_tomax;
2423
2424 /*
2425 * First, the aligned portion.
2426 */
2427 while (dlimit - daddr >= sizeof (uint64_t)) {
2428 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2429
2430 daddr += sizeof (uint64_t);
2431 saddr += sizeof (uint64_t);
2432 }
2433
2434 /*
2435 * Now any left-over bit...
2436 */
2437 while (dlimit - daddr)
2438 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2439
2440 /*
2441 * Finally, commit the reserved space in the destination buffer.
2442 */
2443 dest->dtb_offset = offs + src->dtb_offset;
2444
2445 out:
2446 /*
2447 * If we're lucky enough to be the only active CPU on this speculation
2448 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2449 */
2450 if (current == DTRACESPEC_ACTIVE ||
2451 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2452 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2453 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2454
2455 ASSERT(rval == DTRACESPEC_COMMITTING);
2456 }
2457
2458 src->dtb_offset = 0;
2459 src->dtb_xamot_drops += src->dtb_drops;
2460 src->dtb_drops = 0;
2461 }
2462
2463 /*
2464 * This routine discards an active speculation. If the specified speculation
2465 * is not in a valid state to perform a discard(), this routine will silently
2466 * do nothing. The state of the specified speculation is transitioned
2467 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2468 */
2469 static void
dtrace_speculation_discard(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2470 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2471 dtrace_specid_t which)
2472 {
2473 dtrace_speculation_t *spec;
2474 dtrace_speculation_state_t current, new = 0;
2475 dtrace_buffer_t *buf;
2476
2477 if (which == 0)
2478 return;
2479
2480 if (which > state->dts_nspeculations) {
2481 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2482 return;
2483 }
2484
2485 spec = &state->dts_speculations[which - 1];
2486 buf = &spec->dtsp_buffer[cpu];
2487
2488 do {
2489 current = spec->dtsp_state;
2490
2491 switch (current) {
2492 case DTRACESPEC_INACTIVE:
2493 case DTRACESPEC_COMMITTINGMANY:
2494 case DTRACESPEC_COMMITTING:
2495 case DTRACESPEC_DISCARDING:
2496 return;
2497
2498 case DTRACESPEC_ACTIVE:
2499 case DTRACESPEC_ACTIVEMANY:
2500 new = DTRACESPEC_DISCARDING;
2501 break;
2502
2503 case DTRACESPEC_ACTIVEONE:
2504 if (buf->dtb_offset != 0) {
2505 new = DTRACESPEC_INACTIVE;
2506 } else {
2507 new = DTRACESPEC_DISCARDING;
2508 }
2509 break;
2510
2511 default:
2512 ASSERT(0);
2513 }
2514 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2515 current, new) != current);
2516
2517 buf->dtb_offset = 0;
2518 buf->dtb_drops = 0;
2519 }
2520
2521 /*
2522 * Note: not called from probe context. This function is called
2523 * asynchronously from cross call context to clean any speculations that are
2524 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2525 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2526 * speculation.
2527 */
2528 static void
dtrace_speculation_clean_here(dtrace_state_t * state)2529 dtrace_speculation_clean_here(dtrace_state_t *state)
2530 {
2531 dtrace_icookie_t cookie;
2532 processorid_t cpu = curcpu_id;
2533 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2534 dtrace_specid_t i;
2535
2536 cookie = dtrace_interrupt_disable();
2537
2538 if (dest->dtb_tomax == NULL) {
2539 dtrace_interrupt_enable(cookie);
2540 return;
2541 }
2542
2543 for (i = 0; i < state->dts_nspeculations; i++) {
2544 dtrace_speculation_t *spec = &state->dts_speculations[i];
2545 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2546
2547 if (src->dtb_tomax == NULL)
2548 continue;
2549
2550 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2551 src->dtb_offset = 0;
2552 continue;
2553 }
2554
2555 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2556 continue;
2557
2558 if (src->dtb_offset == 0)
2559 continue;
2560
2561 dtrace_speculation_commit(state, cpu, i + 1);
2562 }
2563
2564 dtrace_interrupt_enable(cookie);
2565 }
2566
2567 /*
2568 * Note: not called from probe context. This function is called
2569 * asynchronously (and at a regular interval) to clean any speculations that
2570 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2571 * is work to be done, it cross calls all CPUs to perform that work;
2572 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2573 * INACTIVE state until they have been cleaned by all CPUs.
2574 */
2575 static void
dtrace_speculation_clean(dtrace_state_t * state)2576 dtrace_speculation_clean(dtrace_state_t *state)
2577 {
2578 int work = 0, rv;
2579 dtrace_specid_t i;
2580
2581 for (i = 0; i < state->dts_nspeculations; i++) {
2582 dtrace_speculation_t *spec = &state->dts_speculations[i];
2583
2584 ASSERT(!spec->dtsp_cleaning);
2585
2586 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2587 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2588 continue;
2589
2590 work++;
2591 spec->dtsp_cleaning = 1;
2592 }
2593
2594 if (!work)
2595 return;
2596
2597 dtrace_xcall(DTRACE_CPUALL,
2598 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2599
2600 /*
2601 * We now know that all CPUs have committed or discarded their
2602 * speculation buffers, as appropriate. We can now set the state
2603 * to inactive.
2604 */
2605 for (i = 0; i < state->dts_nspeculations; i++) {
2606 dtrace_speculation_t *spec = &state->dts_speculations[i];
2607 dtrace_speculation_state_t current, new;
2608
2609 if (!spec->dtsp_cleaning)
2610 continue;
2611
2612 current = spec->dtsp_state;
2613 ASSERT(current == DTRACESPEC_DISCARDING ||
2614 current == DTRACESPEC_COMMITTINGMANY);
2615
2616 new = DTRACESPEC_INACTIVE;
2617
2618 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2619 ASSERT(rv == current);
2620 spec->dtsp_cleaning = 0;
2621 }
2622 }
2623
2624 /*
2625 * Called as part of a speculate() to get the speculative buffer associated
2626 * with a given speculation. Returns NULL if the specified speculation is not
2627 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2628 * the active CPU is not the specified CPU -- the speculation will be
2629 * atomically transitioned into the ACTIVEMANY state.
2630 */
2631 static dtrace_buffer_t *
dtrace_speculation_buffer(dtrace_state_t * state,processorid_t cpuid,dtrace_specid_t which)2632 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2633 dtrace_specid_t which)
2634 {
2635 dtrace_speculation_t *spec;
2636 dtrace_speculation_state_t current, new = 0;
2637 dtrace_buffer_t *buf;
2638
2639 if (which == 0)
2640 return (NULL);
2641
2642 if (which > state->dts_nspeculations) {
2643 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2644 return (NULL);
2645 }
2646
2647 spec = &state->dts_speculations[which - 1];
2648 buf = &spec->dtsp_buffer[cpuid];
2649
2650 do {
2651 current = spec->dtsp_state;
2652
2653 switch (current) {
2654 case DTRACESPEC_INACTIVE:
2655 case DTRACESPEC_COMMITTINGMANY:
2656 case DTRACESPEC_DISCARDING:
2657 return (NULL);
2658
2659 case DTRACESPEC_COMMITTING:
2660 ASSERT(buf->dtb_offset == 0);
2661 return (NULL);
2662
2663 case DTRACESPEC_ACTIVEONE:
2664 /*
2665 * This speculation is currently active on one CPU.
2666 * Check the offset in the buffer; if it's non-zero,
2667 * that CPU must be us (and we leave the state alone).
2668 * If it's zero, assume that we're starting on a new
2669 * CPU -- and change the state to indicate that the
2670 * speculation is active on more than one CPU.
2671 */
2672 if (buf->dtb_offset != 0)
2673 return (buf);
2674
2675 new = DTRACESPEC_ACTIVEMANY;
2676 break;
2677
2678 case DTRACESPEC_ACTIVEMANY:
2679 return (buf);
2680
2681 case DTRACESPEC_ACTIVE:
2682 new = DTRACESPEC_ACTIVEONE;
2683 break;
2684
2685 default:
2686 ASSERT(0);
2687 }
2688 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2689 current, new) != current);
2690
2691 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2692 return (buf);
2693 }
2694
2695 /*
2696 * Return a string. In the event that the user lacks the privilege to access
2697 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2698 * don't fail access checking.
2699 *
2700 * dtrace_dif_variable() uses this routine as a helper for various
2701 * builtin values such as 'execname' and 'probefunc.'
2702 */
2703 uintptr_t
dtrace_dif_varstr(uintptr_t addr,dtrace_state_t * state,dtrace_mstate_t * mstate)2704 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2705 dtrace_mstate_t *mstate)
2706 {
2707 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2708 uintptr_t ret;
2709 size_t strsz;
2710
2711 /*
2712 * The easy case: this probe is allowed to read all of memory, so
2713 * we can just return this as a vanilla pointer.
2714 */
2715 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2716 return (addr);
2717
2718 /*
2719 * This is the tougher case: we copy the string in question from
2720 * kernel memory into scratch memory and return it that way: this
2721 * ensures that we won't trip up when access checking tests the
2722 * BYREF return value.
2723 */
2724 strsz = dtrace_strlen((char *)addr, size) + 1;
2725
2726 if (mstate->dtms_scratch_ptr + strsz >
2727 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2728 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2729 return (0);
2730 }
2731
2732 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2733 strsz);
2734 ret = mstate->dtms_scratch_ptr;
2735 mstate->dtms_scratch_ptr += strsz;
2736 return (ret);
2737 }
2738
2739 #ifdef notyet
2740 /*
2741 * Return a string from a memoy address which is known to have one or
2742 * more concatenated, individually zero terminated, sub-strings.
2743 * In the event that the user lacks the privilege to access
2744 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2745 * don't fail access checking.
2746 *
2747 * dtrace_dif_variable() uses this routine as a helper for various
2748 * builtin values such as 'execargs'.
2749 */
2750 static uintptr_t
dtrace_dif_varstrz(uintptr_t addr,size_t strsz,dtrace_state_t * state,dtrace_mstate_t * mstate)2751 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
2752 dtrace_mstate_t *mstate)
2753 {
2754 char *p;
2755 size_t i;
2756 uintptr_t ret;
2757
2758 if (mstate->dtms_scratch_ptr + strsz >
2759 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2760 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2761 return (0);
2762 }
2763
2764 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2765 strsz);
2766
2767 /* Replace sub-string termination characters with a space. */
2768 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
2769 p++, i++)
2770 if (*p == '\0')
2771 *p = ' ';
2772
2773 ret = mstate->dtms_scratch_ptr;
2774 mstate->dtms_scratch_ptr += strsz;
2775 return (ret);
2776 }
2777 #endif
2778
2779 /*
2780 * This function implements the DIF emulator's variable lookups. The emulator
2781 * passes a reserved variable identifier and optional built-in array index.
2782 */
2783 static uint64_t
dtrace_dif_variable(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t v,uint64_t ndx)2784 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2785 uint64_t ndx)
2786 {
2787 /*
2788 * If we're accessing one of the uncached arguments, we'll turn this
2789 * into a reference in the args array.
2790 */
2791 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2792 ndx = v - DIF_VAR_ARG0;
2793 v = DIF_VAR_ARGS;
2794 }
2795
2796 switch (v) {
2797 case DIF_VAR_ARGS:
2798 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2799 if (ndx >= sizeof (mstate->dtms_arg) /
2800 sizeof (mstate->dtms_arg[0])) {
2801 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2802 dtrace_provider_t *pv;
2803 uint64_t val;
2804
2805 pv = mstate->dtms_probe->dtpr_provider;
2806 if (pv->dtpv_pops.dtps_getargval != NULL)
2807 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2808 mstate->dtms_probe->dtpr_id,
2809 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2810 else
2811 val = dtrace_getarg(ndx, aframes);
2812
2813 /*
2814 * This is regrettably required to keep the compiler
2815 * from tail-optimizing the call to dtrace_getarg().
2816 * The condition always evaluates to true, but the
2817 * compiler has no way of figuring that out a priori.
2818 * (None of this would be necessary if the compiler
2819 * could be relied upon to _always_ tail-optimize
2820 * the call to dtrace_getarg() -- but it can't.)
2821 */
2822 if (mstate->dtms_probe != NULL)
2823 return (val);
2824
2825 ASSERT(0);
2826 }
2827
2828 return (mstate->dtms_arg[ndx]);
2829
2830 #if defined(sun)
2831 case DIF_VAR_UREGS: {
2832 klwp_t *lwp;
2833
2834 if (!dtrace_priv_proc(state))
2835 return (0);
2836
2837 if ((lwp = curthread->t_lwp) == NULL) {
2838 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2839 cpu_core[curcpu_id].cpuc_dtrace_illval = NULL;
2840 return (0);
2841 }
2842
2843 return (dtrace_getreg(lwp->lwp_regs, ndx));
2844 return (0);
2845 }
2846 #endif
2847
2848 case DIF_VAR_CURTHREAD:
2849 if (!dtrace_priv_kernel(state))
2850 return (0);
2851 return ((uint64_t)(uintptr_t)curthread);
2852
2853 case DIF_VAR_TIMESTAMP:
2854 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2855 mstate->dtms_timestamp = dtrace_gethrtime();
2856 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2857 }
2858 return (mstate->dtms_timestamp);
2859
2860 case DIF_VAR_VTIMESTAMP:
2861 ASSERT(dtrace_vtime_references != 0);
2862 return (curthread->t_dtrace_vtime);
2863
2864 case DIF_VAR_WALLTIMESTAMP:
2865 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2866 mstate->dtms_walltimestamp = dtrace_gethrestime();
2867 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2868 }
2869 return (mstate->dtms_walltimestamp);
2870
2871 #if defined(sun)
2872 case DIF_VAR_IPL:
2873 if (!dtrace_priv_kernel(state))
2874 return (0);
2875 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2876 mstate->dtms_ipl = dtrace_getipl();
2877 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2878 }
2879 return (mstate->dtms_ipl);
2880 #endif
2881
2882 case DIF_VAR_EPID:
2883 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2884 return (mstate->dtms_epid);
2885
2886 case DIF_VAR_ID:
2887 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2888 return (mstate->dtms_probe->dtpr_id);
2889
2890 case DIF_VAR_STACKDEPTH:
2891 if (!dtrace_priv_kernel(state))
2892 return (0);
2893 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2894 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2895
2896 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2897 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2898 }
2899 return (mstate->dtms_stackdepth);
2900
2901 case DIF_VAR_USTACKDEPTH:
2902 if (!dtrace_priv_proc(state))
2903 return (0);
2904 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2905 /*
2906 * See comment in DIF_VAR_PID.
2907 */
2908 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2909 CPU_ON_INTR(CPU)) {
2910 mstate->dtms_ustackdepth = 0;
2911 } else {
2912 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2913 mstate->dtms_ustackdepth =
2914 dtrace_getustackdepth();
2915 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2916 }
2917 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2918 }
2919 return (mstate->dtms_ustackdepth);
2920
2921 case DIF_VAR_CALLER:
2922 if (!dtrace_priv_kernel(state))
2923 return (0);
2924 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2925 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2926
2927 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2928 /*
2929 * If this is an unanchored probe, we are
2930 * required to go through the slow path:
2931 * dtrace_caller() only guarantees correct
2932 * results for anchored probes.
2933 */
2934 pc_t caller[2] = {0, 0};
2935
2936 dtrace_getpcstack(caller, 2, aframes,
2937 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2938 mstate->dtms_caller = caller[1];
2939 } else if ((mstate->dtms_caller =
2940 dtrace_caller(aframes)) == -1) {
2941 /*
2942 * We have failed to do this the quick way;
2943 * we must resort to the slower approach of
2944 * calling dtrace_getpcstack().
2945 */
2946 pc_t caller = 0;
2947
2948 dtrace_getpcstack(&caller, 1, aframes, NULL);
2949 mstate->dtms_caller = caller;
2950 }
2951
2952 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2953 }
2954 return (mstate->dtms_caller);
2955
2956 case DIF_VAR_UCALLER:
2957 if (!dtrace_priv_proc(state))
2958 return (0);
2959
2960 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2961 uint64_t ustack[3];
2962
2963 /*
2964 * dtrace_getupcstack() fills in the first uint64_t
2965 * with the current PID. The second uint64_t will
2966 * be the program counter at user-level. The third
2967 * uint64_t will contain the caller, which is what
2968 * we're after.
2969 */
2970 ustack[2] = 0;
2971 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2972 dtrace_getupcstack(ustack, 3);
2973 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2974 mstate->dtms_ucaller = ustack[2];
2975 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2976 }
2977
2978 return (mstate->dtms_ucaller);
2979
2980 case DIF_VAR_PROBEPROV:
2981 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2982 return (dtrace_dif_varstr(
2983 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2984 state, mstate));
2985
2986 case DIF_VAR_PROBEMOD:
2987 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2988 return (dtrace_dif_varstr(
2989 (uintptr_t)mstate->dtms_probe->dtpr_mod,
2990 state, mstate));
2991
2992 case DIF_VAR_PROBEFUNC:
2993 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2994 return (dtrace_dif_varstr(
2995 (uintptr_t)mstate->dtms_probe->dtpr_func,
2996 state, mstate));
2997
2998 case DIF_VAR_PROBENAME:
2999 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3000 return (dtrace_dif_varstr(
3001 (uintptr_t)mstate->dtms_probe->dtpr_name,
3002 state, mstate));
3003
3004 case DIF_VAR_PID:
3005 if (!dtrace_priv_proc(state))
3006 return (0);
3007
3008 #if defined(sun)
3009 /*
3010 * Note that we are assuming that an unanchored probe is
3011 * always due to a high-level interrupt. (And we're assuming
3012 * that there is only a single high level interrupt.)
3013 */
3014 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3015 return (pid0.pid_id);
3016
3017 /*
3018 * It is always safe to dereference one's own t_procp pointer:
3019 * it always points to a valid, allocated proc structure.
3020 * Further, it is always safe to dereference the p_pidp member
3021 * of one's own proc structure. (These are truisms becuase
3022 * threads and processes don't clean up their own state --
3023 * they leave that task to whomever reaps them.)
3024 */
3025 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3026 #else
3027 return ((uint64_t)curproc->p_pid);
3028 #endif
3029
3030 case DIF_VAR_PPID:
3031 if (!dtrace_priv_proc(state))
3032 return (0);
3033
3034 #if defined(sun)
3035 /*
3036 * See comment in DIF_VAR_PID.
3037 */
3038 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3039 return (pid0.pid_id);
3040
3041 /*
3042 * It is always safe to dereference one's own t_procp pointer:
3043 * it always points to a valid, allocated proc structure.
3044 * (This is true because threads don't clean up their own
3045 * state -- they leave that task to whomever reaps them.)
3046 */
3047 return ((uint64_t)curthread->t_procp->p_ppid);
3048 #else
3049 return ((uint64_t)curproc->p_pptr->p_pid);
3050 #endif
3051
3052 case DIF_VAR_TID:
3053 #if defined(sun)
3054 /*
3055 * See comment in DIF_VAR_PID.
3056 */
3057 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3058 return (0);
3059 #endif
3060
3061 return ((uint64_t)curthread->t_tid);
3062
3063 case DIF_VAR_EXECARGS: {
3064 #if 0
3065 struct pargs *p_args = curthread->td_proc->p_args;
3066
3067 if (p_args == NULL)
3068 return(0);
3069
3070 return (dtrace_dif_varstrz(
3071 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3072 #endif
3073 /* XXX FreeBSD extension */
3074 return 0;
3075 }
3076
3077 case DIF_VAR_EXECNAME:
3078 #if defined(sun)
3079 if (!dtrace_priv_proc(state))
3080 return (0);
3081
3082 /*
3083 * See comment in DIF_VAR_PID.
3084 */
3085 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3086 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3087
3088 /*
3089 * It is always safe to dereference one's own t_procp pointer:
3090 * it always points to a valid, allocated proc structure.
3091 * (This is true because threads don't clean up their own
3092 * state -- they leave that task to whomever reaps them.)
3093 */
3094 return (dtrace_dif_varstr(
3095 (uintptr_t)curthread->t_procp->p_user.u_comm,
3096 state, mstate));
3097 #else
3098 return (dtrace_dif_varstr(
3099 (uintptr_t) curthread->l_proc->p_comm, state, mstate));
3100 #endif
3101
3102 case DIF_VAR_ZONENAME:
3103 #if defined(sun)
3104 if (!dtrace_priv_proc(state))
3105 return (0);
3106
3107 /*
3108 * See comment in DIF_VAR_PID.
3109 */
3110 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3111 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3112
3113 /*
3114 * It is always safe to dereference one's own t_procp pointer:
3115 * it always points to a valid, allocated proc structure.
3116 * (This is true because threads don't clean up their own
3117 * state -- they leave that task to whomever reaps them.)
3118 */
3119 return (dtrace_dif_varstr(
3120 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3121 state, mstate));
3122 #else
3123 return (0);
3124 #endif
3125
3126 case DIF_VAR_UID:
3127 if (!dtrace_priv_proc(state))
3128 return (0);
3129
3130 #if defined(sun)
3131 /*
3132 * See comment in DIF_VAR_PID.
3133 */
3134 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3135 return ((uint64_t)p0.p_cred->cr_uid);
3136
3137 /*
3138 * It is always safe to dereference one's own t_procp pointer:
3139 * it always points to a valid, allocated proc structure.
3140 * (This is true because threads don't clean up their own
3141 * state -- they leave that task to whomever reaps them.)
3142 *
3143 * Additionally, it is safe to dereference one's own process
3144 * credential, since this is never NULL after process birth.
3145 */
3146 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3147 #else
3148 return (uint64_t)kauth_cred_getuid(curthread->t_procp->p_cred);
3149 #endif
3150
3151 case DIF_VAR_GID:
3152 if (!dtrace_priv_proc(state))
3153 return (0);
3154
3155 #if defined(sun)
3156 /*
3157 * See comment in DIF_VAR_PID.
3158 */
3159 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3160 return ((uint64_t)p0.p_cred->cr_gid);
3161
3162 /*
3163 * It is always safe to dereference one's own t_procp pointer:
3164 * it always points to a valid, allocated proc structure.
3165 * (This is true because threads don't clean up their own
3166 * state -- they leave that task to whomever reaps them.)
3167 *
3168 * Additionally, it is safe to dereference one's own process
3169 * credential, since this is never NULL after process birth.
3170 */
3171 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3172 #else
3173 return (uint64_t)kauth_cred_getgid(curthread->t_procp->p_cred);
3174 #endif
3175
3176 case DIF_VAR_ERRNO: {
3177 #if defined(sun)
3178 klwp_t *lwp;
3179 if (!dtrace_priv_proc(state))
3180 return (0);
3181
3182 /*
3183 * See comment in DIF_VAR_PID.
3184 */
3185 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3186 return (0);
3187
3188 /*
3189 * It is always safe to dereference one's own t_lwp pointer in
3190 * the event that this pointer is non-NULL. (This is true
3191 * because threads and lwps don't clean up their own state --
3192 * they leave that task to whomever reaps them.)
3193 */
3194 if ((lwp = curthread->t_lwp) == NULL)
3195 return (0);
3196
3197 return ((uint64_t)lwp->lwp_errno);
3198 #else
3199 #if 0
3200 return (curthread->l_errno);
3201 #else
3202 return 0; /* XXX TBD errno support at lwp level? */
3203 #endif
3204 #endif
3205 }
3206 default:
3207 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3208 return (0);
3209 }
3210 }
3211
3212 /*
3213 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3214 * Notice that we don't bother validating the proper number of arguments or
3215 * their types in the tuple stack. This isn't needed because all argument
3216 * interpretation is safe because of our load safety -- the worst that can
3217 * happen is that a bogus program can obtain bogus results.
3218 */
3219 static void
dtrace_dif_subr(uint_t subr,uint_t rd,uint64_t * regs,dtrace_key_t * tupregs,int nargs,dtrace_mstate_t * mstate,dtrace_state_t * state)3220 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3221 dtrace_key_t *tupregs, int nargs,
3222 dtrace_mstate_t *mstate, dtrace_state_t *state)
3223 {
3224 volatile uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
3225 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval;
3226 dtrace_vstate_t *vstate = &state->dts_vstate;
3227
3228 #if defined(sun)
3229 union {
3230 mutex_impl_t mi;
3231 uint64_t mx;
3232 } m;
3233
3234 union {
3235 krwlock_t ri;
3236 uintptr_t rw;
3237 } r;
3238 #else
3239 union {
3240 kmutex_t mi;
3241 uint64_t mx;
3242 } m;
3243
3244 union {
3245 krwlock_t ri;
3246 uintptr_t rw;
3247 } r;
3248 #endif
3249
3250 switch (subr) {
3251 case DIF_SUBR_RAND:
3252 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3253 break;
3254
3255 #if defined(sun)
3256 case DIF_SUBR_MUTEX_OWNED:
3257 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3258 mstate, vstate)) {
3259 regs[rd] = 0;
3260 break;
3261 }
3262
3263 m.mx = dtrace_load64(tupregs[0].dttk_value);
3264 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3265 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3266 else
3267 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3268 break;
3269
3270 case DIF_SUBR_MUTEX_OWNER:
3271 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3272 mstate, vstate)) {
3273 regs[rd] = 0;
3274 break;
3275 }
3276
3277 m.mx = dtrace_load64(tupregs[0].dttk_value);
3278 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3279 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3280 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3281 else
3282 regs[rd] = 0;
3283 break;
3284
3285 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3286 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3287 mstate, vstate)) {
3288 regs[rd] = 0;
3289 break;
3290 }
3291
3292 m.mx = dtrace_load64(tupregs[0].dttk_value);
3293 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3294 break;
3295
3296 case DIF_SUBR_MUTEX_TYPE_SPIN:
3297 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3298 mstate, vstate)) {
3299 regs[rd] = 0;
3300 break;
3301 }
3302
3303 m.mx = dtrace_load64(tupregs[0].dttk_value);
3304 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3305 break;
3306
3307 case DIF_SUBR_RW_READ_HELD: {
3308 uintptr_t tmp;
3309
3310 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3311 mstate, vstate)) {
3312 regs[rd] = 0;
3313 break;
3314 }
3315
3316 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3317 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3318 break;
3319 }
3320
3321 case DIF_SUBR_RW_WRITE_HELD:
3322 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3323 mstate, vstate)) {
3324 regs[rd] = 0;
3325 break;
3326 }
3327
3328 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3329 regs[rd] = _RW_WRITE_HELD(&r.ri);
3330 break;
3331
3332 case DIF_SUBR_RW_ISWRITER:
3333 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3334 mstate, vstate)) {
3335 regs[rd] = 0;
3336 break;
3337 }
3338
3339 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3340 regs[rd] = _RW_ISWRITER(&r.ri);
3341 break;
3342
3343 #else
3344 case DIF_SUBR_MUTEX_OWNED:
3345 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3346 mstate, vstate)) {
3347 regs[rd] = 0;
3348 break;
3349 }
3350
3351 m.mx = dtrace_load64(tupregs[0].dttk_value);
3352 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3353 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3354 else
3355 regs[rd] = __SIMPLELOCK_LOCKED_P(&m.mi.mtx_lock);
3356 break;
3357
3358 case DIF_SUBR_MUTEX_OWNER:
3359 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3360 mstate, vstate)) {
3361 regs[rd] = 0;
3362 break;
3363 }
3364
3365 m.mx = dtrace_load64(tupregs[0].dttk_value);
3366 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3367 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3368 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3369 else
3370 regs[rd] = 0;
3371 break;
3372
3373 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3374 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3375 mstate, vstate)) {
3376 regs[rd] = 0;
3377 break;
3378 }
3379
3380 m.mx = dtrace_load64(tupregs[0].dttk_value);
3381 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3382 break;
3383
3384 case DIF_SUBR_MUTEX_TYPE_SPIN:
3385 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3386 mstate, vstate)) {
3387 regs[rd] = 0;
3388 break;
3389 }
3390
3391 m.mx = dtrace_load64(tupregs[0].dttk_value);
3392 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3393 break;
3394
3395 case DIF_SUBR_RW_READ_HELD: {
3396 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3397 mstate, vstate)) {
3398 regs[rd] = 0;
3399 break;
3400 }
3401
3402 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3403 regs[rd] = _RW_READ_HELD(&r.ri);
3404 break;
3405 }
3406
3407 case DIF_SUBR_RW_WRITE_HELD:
3408 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3409 mstate, vstate)) {
3410 regs[rd] = 0;
3411 break;
3412 }
3413
3414 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3415 regs[rd] = _RW_WRITE_HELD(&r.ri);
3416 break;
3417
3418 case DIF_SUBR_RW_ISWRITER:
3419 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3420 mstate, vstate)) {
3421 regs[rd] = 0;
3422 break;
3423 }
3424
3425 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3426 regs[rd] = _RW_ISWRITER(&r.ri);
3427 break;
3428
3429 #endif /* ! defined(sun) */
3430
3431 case DIF_SUBR_BCOPY: {
3432 /*
3433 * We need to be sure that the destination is in the scratch
3434 * region -- no other region is allowed.
3435 */
3436 uintptr_t src = tupregs[0].dttk_value;
3437 uintptr_t dest = tupregs[1].dttk_value;
3438 size_t size = tupregs[2].dttk_value;
3439
3440 if (!dtrace_inscratch(dest, size, mstate)) {
3441 *flags |= CPU_DTRACE_BADADDR;
3442 *illval = regs[rd];
3443 break;
3444 }
3445
3446 if (!dtrace_canload(src, size, mstate, vstate)) {
3447 regs[rd] = 0;
3448 break;
3449 }
3450
3451 dtrace_bcopy((void *)src, (void *)dest, size);
3452 break;
3453 }
3454
3455 case DIF_SUBR_ALLOCA:
3456 case DIF_SUBR_COPYIN: {
3457 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3458 uint64_t size =
3459 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3460 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3461
3462 /*
3463 * This action doesn't require any credential checks since
3464 * probes will not activate in user contexts to which the
3465 * enabling user does not have permissions.
3466 */
3467
3468 /*
3469 * Rounding up the user allocation size could have overflowed
3470 * a large, bogus allocation (like -1ULL) to 0.
3471 */
3472 if (scratch_size < size ||
3473 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3474 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3475 regs[rd] = 0;
3476 break;
3477 }
3478
3479 if (subr == DIF_SUBR_COPYIN) {
3480 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3481 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3482 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3483 }
3484
3485 mstate->dtms_scratch_ptr += scratch_size;
3486 regs[rd] = dest;
3487 break;
3488 }
3489
3490 case DIF_SUBR_COPYINTO: {
3491 uint64_t size = tupregs[1].dttk_value;
3492 uintptr_t dest = tupregs[2].dttk_value;
3493
3494 /*
3495 * This action doesn't require any credential checks since
3496 * probes will not activate in user contexts to which the
3497 * enabling user does not have permissions.
3498 */
3499 if (!dtrace_inscratch(dest, size, mstate)) {
3500 *flags |= CPU_DTRACE_BADADDR;
3501 *illval = regs[rd];
3502 break;
3503 }
3504
3505 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3506 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3507 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3508 break;
3509 }
3510
3511 case DIF_SUBR_COPYINSTR: {
3512 uintptr_t dest = mstate->dtms_scratch_ptr;
3513 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3514
3515 if (nargs > 1 && tupregs[1].dttk_value < size)
3516 size = tupregs[1].dttk_value + 1;
3517
3518 /*
3519 * This action doesn't require any credential checks since
3520 * probes will not activate in user contexts to which the
3521 * enabling user does not have permissions.
3522 */
3523 if (!DTRACE_INSCRATCH(mstate, size)) {
3524 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3525 regs[rd] = 0;
3526 break;
3527 }
3528
3529 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3530 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3531 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3532
3533 ((char *)dest)[size - 1] = '\0';
3534 mstate->dtms_scratch_ptr += size;
3535 regs[rd] = dest;
3536 break;
3537 }
3538
3539 #if defined(sun)
3540 case DIF_SUBR_MSGSIZE:
3541 case DIF_SUBR_MSGDSIZE: {
3542 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3543 uintptr_t wptr, rptr;
3544 size_t count = 0;
3545 int cont = 0;
3546
3547 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
3548
3549 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3550 vstate)) {
3551 regs[rd] = 0;
3552 break;
3553 }
3554
3555 wptr = dtrace_loadptr(baddr +
3556 offsetof(mblk_t, b_wptr));
3557
3558 rptr = dtrace_loadptr(baddr +
3559 offsetof(mblk_t, b_rptr));
3560
3561 if (wptr < rptr) {
3562 *flags |= CPU_DTRACE_BADADDR;
3563 *illval = tupregs[0].dttk_value;
3564 break;
3565 }
3566
3567 daddr = dtrace_loadptr(baddr +
3568 offsetof(mblk_t, b_datap));
3569
3570 baddr = dtrace_loadptr(baddr +
3571 offsetof(mblk_t, b_cont));
3572
3573 /*
3574 * We want to prevent against denial-of-service here,
3575 * so we're only going to search the list for
3576 * dtrace_msgdsize_max mblks.
3577 */
3578 if (cont++ > dtrace_msgdsize_max) {
3579 *flags |= CPU_DTRACE_ILLOP;
3580 break;
3581 }
3582
3583 if (subr == DIF_SUBR_MSGDSIZE) {
3584 if (dtrace_load8(daddr +
3585 offsetof(dblk_t, db_type)) != M_DATA)
3586 continue;
3587 }
3588
3589 count += wptr - rptr;
3590 }
3591
3592 if (!(*flags & CPU_DTRACE_FAULT))
3593 regs[rd] = count;
3594
3595 break;
3596 }
3597 #endif
3598
3599 case DIF_SUBR_PROGENYOF: {
3600 pid_t pid = tupregs[0].dttk_value;
3601 proc_t *p;
3602 int rval = 0;
3603
3604 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3605
3606 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3607 #if defined(sun)
3608 if (p->p_pidp->pid_id == pid) {
3609 #else
3610 if (p->p_pid == pid) {
3611 #endif
3612 rval = 1;
3613 break;
3614 }
3615 }
3616
3617 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3618
3619 regs[rd] = rval;
3620 break;
3621 }
3622
3623 case DIF_SUBR_SPECULATION:
3624 regs[rd] = dtrace_speculation(state);
3625 break;
3626
3627 case DIF_SUBR_COPYOUT: {
3628 uintptr_t kaddr = tupregs[0].dttk_value;
3629 uintptr_t uaddr = tupregs[1].dttk_value;
3630 uint64_t size = tupregs[2].dttk_value;
3631
3632 if (!dtrace_destructive_disallow &&
3633 dtrace_priv_proc_control(state) &&
3634 !dtrace_istoxic(kaddr, size)) {
3635 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3636 dtrace_copyout(kaddr, uaddr, size, flags);
3637 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3638 }
3639 break;
3640 }
3641
3642 case DIF_SUBR_COPYOUTSTR: {
3643 uintptr_t kaddr = tupregs[0].dttk_value;
3644 uintptr_t uaddr = tupregs[1].dttk_value;
3645 uint64_t size = tupregs[2].dttk_value;
3646
3647 if (!dtrace_destructive_disallow &&
3648 dtrace_priv_proc_control(state) &&
3649 !dtrace_istoxic(kaddr, size)) {
3650 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3651 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3652 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3653 }
3654 break;
3655 }
3656
3657 case DIF_SUBR_STRLEN: {
3658 size_t sz;
3659 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3660 sz = dtrace_strlen((char *)addr,
3661 state->dts_options[DTRACEOPT_STRSIZE]);
3662
3663 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3664 regs[rd] = 0;
3665 break;
3666 }
3667
3668 regs[rd] = sz;
3669
3670 break;
3671 }
3672
3673 case DIF_SUBR_STRCHR:
3674 case DIF_SUBR_STRRCHR: {
3675 /*
3676 * We're going to iterate over the string looking for the
3677 * specified character. We will iterate until we have reached
3678 * the string length or we have found the character. If this
3679 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3680 * of the specified character instead of the first.
3681 */
3682 uintptr_t saddr = tupregs[0].dttk_value;
3683 uintptr_t addr = tupregs[0].dttk_value;
3684 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3685 char c, target = (char)tupregs[1].dttk_value;
3686
3687 for (regs[rd] = 0; addr < limit; addr++) {
3688 if ((c = dtrace_load8(addr)) == target) {
3689 regs[rd] = addr;
3690
3691 if (subr == DIF_SUBR_STRCHR)
3692 break;
3693 }
3694
3695 if (c == '\0')
3696 break;
3697 }
3698
3699 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3700 regs[rd] = 0;
3701 break;
3702 }
3703
3704 break;
3705 }
3706
3707 case DIF_SUBR_STRSTR:
3708 case DIF_SUBR_INDEX:
3709 case DIF_SUBR_RINDEX: {
3710 /*
3711 * We're going to iterate over the string looking for the
3712 * specified string. We will iterate until we have reached
3713 * the string length or we have found the string. (Yes, this
3714 * is done in the most naive way possible -- but considering
3715 * that the string we're searching for is likely to be
3716 * relatively short, the complexity of Rabin-Karp or similar
3717 * hardly seems merited.)
3718 */
3719 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3720 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3721 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3722 size_t len = dtrace_strlen(addr, size);
3723 size_t sublen = dtrace_strlen(substr, size);
3724 char *limit = addr + len, *orig = addr;
3725 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3726 int inc = 1;
3727
3728 regs[rd] = notfound;
3729
3730 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3731 regs[rd] = 0;
3732 break;
3733 }
3734
3735 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3736 vstate)) {
3737 regs[rd] = 0;
3738 break;
3739 }
3740
3741 /*
3742 * strstr() and index()/rindex() have similar semantics if
3743 * both strings are the empty string: strstr() returns a
3744 * pointer to the (empty) string, and index() and rindex()
3745 * both return index 0 (regardless of any position argument).
3746 */
3747 if (sublen == 0 && len == 0) {
3748 if (subr == DIF_SUBR_STRSTR)
3749 regs[rd] = (uintptr_t)addr;
3750 else
3751 regs[rd] = 0;
3752 break;
3753 }
3754
3755 if (subr != DIF_SUBR_STRSTR) {
3756 if (subr == DIF_SUBR_RINDEX) {
3757 limit = orig - 1;
3758 addr += len;
3759 inc = -1;
3760 }
3761
3762 /*
3763 * Both index() and rindex() take an optional position
3764 * argument that denotes the starting position.
3765 */
3766 if (nargs == 3) {
3767 int64_t pos = (int64_t)tupregs[2].dttk_value;
3768
3769 /*
3770 * If the position argument to index() is
3771 * negative, Perl implicitly clamps it at
3772 * zero. This semantic is a little surprising
3773 * given the special meaning of negative
3774 * positions to similar Perl functions like
3775 * substr(), but it appears to reflect a
3776 * notion that index() can start from a
3777 * negative index and increment its way up to
3778 * the string. Given this notion, Perl's
3779 * rindex() is at least self-consistent in
3780 * that it implicitly clamps positions greater
3781 * than the string length to be the string
3782 * length. Where Perl completely loses
3783 * coherence, however, is when the specified
3784 * substring is the empty string (""). In
3785 * this case, even if the position is
3786 * negative, rindex() returns 0 -- and even if
3787 * the position is greater than the length,
3788 * index() returns the string length. These
3789 * semantics violate the notion that index()
3790 * should never return a value less than the
3791 * specified position and that rindex() should
3792 * never return a value greater than the
3793 * specified position. (One assumes that
3794 * these semantics are artifacts of Perl's
3795 * implementation and not the results of
3796 * deliberate design -- it beggars belief that
3797 * even Larry Wall could desire such oddness.)
3798 * While in the abstract one would wish for
3799 * consistent position semantics across
3800 * substr(), index() and rindex() -- or at the
3801 * very least self-consistent position
3802 * semantics for index() and rindex() -- we
3803 * instead opt to keep with the extant Perl
3804 * semantics, in all their broken glory. (Do
3805 * we have more desire to maintain Perl's
3806 * semantics than Perl does? Probably.)
3807 */
3808 if (subr == DIF_SUBR_RINDEX) {
3809 if (pos < 0) {
3810 if (sublen == 0)
3811 regs[rd] = 0;
3812 break;
3813 }
3814
3815 if (pos > len)
3816 pos = len;
3817 } else {
3818 if (pos < 0)
3819 pos = 0;
3820
3821 if (pos >= len) {
3822 if (sublen == 0)
3823 regs[rd] = len;
3824 break;
3825 }
3826 }
3827
3828 addr = orig + pos;
3829 }
3830 }
3831
3832 for (regs[rd] = notfound; addr != limit; addr += inc) {
3833 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3834 if (subr != DIF_SUBR_STRSTR) {
3835 /*
3836 * As D index() and rindex() are
3837 * modeled on Perl (and not on awk),
3838 * we return a zero-based (and not a
3839 * one-based) index. (For you Perl
3840 * weenies: no, we're not going to add
3841 * $[ -- and shouldn't you be at a con
3842 * or something?)
3843 */
3844 regs[rd] = (uintptr_t)(addr - orig);
3845 break;
3846 }
3847
3848 ASSERT(subr == DIF_SUBR_STRSTR);
3849 regs[rd] = (uintptr_t)addr;
3850 break;
3851 }
3852 }
3853
3854 break;
3855 }
3856
3857 case DIF_SUBR_STRTOK: {
3858 uintptr_t addr = tupregs[0].dttk_value;
3859 uintptr_t tokaddr = tupregs[1].dttk_value;
3860 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3861 uintptr_t limit, toklimit = tokaddr + size;
3862 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
3863 char *dest = (char *)mstate->dtms_scratch_ptr;
3864 int i;
3865
3866 /*
3867 * Check both the token buffer and (later) the input buffer,
3868 * since both could be non-scratch addresses.
3869 */
3870 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3871 regs[rd] = 0;
3872 break;
3873 }
3874
3875 if (!DTRACE_INSCRATCH(mstate, size)) {
3876 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3877 regs[rd] = 0;
3878 break;
3879 }
3880
3881 if (addr == 0) {
3882 /*
3883 * If the address specified is NULL, we use our saved
3884 * strtok pointer from the mstate. Note that this
3885 * means that the saved strtok pointer is _only_
3886 * valid within multiple enablings of the same probe --
3887 * it behaves like an implicit clause-local variable.
3888 */
3889 addr = mstate->dtms_strtok;
3890 } else {
3891 /*
3892 * If the user-specified address is non-NULL we must
3893 * access check it. This is the only time we have
3894 * a chance to do so, since this address may reside
3895 * in the string table of this clause-- future calls
3896 * (when we fetch addr from mstate->dtms_strtok)
3897 * would fail this access check.
3898 */
3899 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3900 regs[rd] = 0;
3901 break;
3902 }
3903 }
3904
3905 /*
3906 * First, zero the token map, and then process the token
3907 * string -- setting a bit in the map for every character
3908 * found in the token string.
3909 */
3910 for (i = 0; i < sizeof (tokmap); i++)
3911 tokmap[i] = 0;
3912
3913 for (; tokaddr < toklimit; tokaddr++) {
3914 if ((c = dtrace_load8(tokaddr)) == '\0')
3915 break;
3916
3917 ASSERT((c >> 3) < sizeof (tokmap));
3918 tokmap[c >> 3] |= (1 << (c & 0x7));
3919 }
3920
3921 for (limit = addr + size; addr < limit; addr++) {
3922 /*
3923 * We're looking for a character that is _not_ contained
3924 * in the token string.
3925 */
3926 if ((c = dtrace_load8(addr)) == '\0')
3927 break;
3928
3929 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3930 break;
3931 }
3932
3933 if (c == '\0') {
3934 /*
3935 * We reached the end of the string without finding
3936 * any character that was not in the token string.
3937 * We return NULL in this case, and we set the saved
3938 * address to NULL as well.
3939 */
3940 regs[rd] = 0;
3941 mstate->dtms_strtok = 0;
3942 break;
3943 }
3944
3945 /*
3946 * From here on, we're copying into the destination string.
3947 */
3948 for (i = 0; addr < limit && i < size - 1; addr++) {
3949 if ((c = dtrace_load8(addr)) == '\0')
3950 break;
3951
3952 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3953 break;
3954
3955 ASSERT(i < size);
3956 dest[i++] = c;
3957 }
3958
3959 ASSERT(i < size);
3960 dest[i] = '\0';
3961 regs[rd] = (uintptr_t)dest;
3962 mstate->dtms_scratch_ptr += size;
3963 mstate->dtms_strtok = addr;
3964 break;
3965 }
3966
3967 case DIF_SUBR_SUBSTR: {
3968 uintptr_t s = tupregs[0].dttk_value;
3969 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3970 char *d = (char *)mstate->dtms_scratch_ptr;
3971 int64_t index = (int64_t)tupregs[1].dttk_value;
3972 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3973 size_t len = dtrace_strlen((char *)s, size);
3974 int64_t i = 0;
3975
3976 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3977 regs[rd] = 0;
3978 break;
3979 }
3980
3981 if (!DTRACE_INSCRATCH(mstate, size)) {
3982 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3983 regs[rd] = 0;
3984 break;
3985 }
3986
3987 if (nargs <= 2)
3988 remaining = (int64_t)size;
3989
3990 if (index < 0) {
3991 index += len;
3992
3993 if (index < 0 && index + remaining > 0) {
3994 remaining += index;
3995 index = 0;
3996 }
3997 }
3998
3999 if (index >= len || index < 0) {
4000 remaining = 0;
4001 } else if (remaining < 0) {
4002 remaining += len - index;
4003 } else if (index + remaining > size) {
4004 remaining = size - index;
4005 }
4006
4007 for (i = 0; i < remaining; i++) {
4008 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4009 break;
4010 }
4011
4012 d[i] = '\0';
4013
4014 mstate->dtms_scratch_ptr += size;
4015 regs[rd] = (uintptr_t)d;
4016 break;
4017 }
4018
4019 #if defined(sun)
4020 case DIF_SUBR_GETMAJOR:
4021 #ifdef _LP64
4022 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4023 #else
4024 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4025 #endif
4026 break;
4027
4028 case DIF_SUBR_GETMINOR:
4029 #ifdef _LP64
4030 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4031 #else
4032 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4033 #endif
4034 break;
4035
4036 case DIF_SUBR_DDI_PATHNAME: {
4037 /*
4038 * This one is a galactic mess. We are going to roughly
4039 * emulate ddi_pathname(), but it's made more complicated
4040 * by the fact that we (a) want to include the minor name and
4041 * (b) must proceed iteratively instead of recursively.
4042 */
4043 uintptr_t dest = mstate->dtms_scratch_ptr;
4044 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4045 char *start = (char *)dest, *end = start + size - 1;
4046 uintptr_t daddr = tupregs[0].dttk_value;
4047 int64_t minor = (int64_t)tupregs[1].dttk_value;
4048 char *s;
4049 int i, len, depth = 0;
4050
4051 /*
4052 * Due to all the pointer jumping we do and context we must
4053 * rely upon, we just mandate that the user must have kernel
4054 * read privileges to use this routine.
4055 */
4056 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4057 *flags |= CPU_DTRACE_KPRIV;
4058 *illval = daddr;
4059 regs[rd] = 0;
4060 }
4061
4062 if (!DTRACE_INSCRATCH(mstate, size)) {
4063 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4064 regs[rd] = 0;
4065 break;
4066 }
4067
4068 *end = '\0';
4069
4070 /*
4071 * We want to have a name for the minor. In order to do this,
4072 * we need to walk the minor list from the devinfo. We want
4073 * to be sure that we don't infinitely walk a circular list,
4074 * so we check for circularity by sending a scout pointer
4075 * ahead two elements for every element that we iterate over;
4076 * if the list is circular, these will ultimately point to the
4077 * same element. You may recognize this little trick as the
4078 * answer to a stupid interview question -- one that always
4079 * seems to be asked by those who had to have it laboriously
4080 * explained to them, and who can't even concisely describe
4081 * the conditions under which one would be forced to resort to
4082 * this technique. Needless to say, those conditions are
4083 * found here -- and probably only here. Is this the only use
4084 * of this infamous trick in shipping, production code? If it
4085 * isn't, it probably should be...
4086 */
4087 if (minor != -1) {
4088 uintptr_t maddr = dtrace_loadptr(daddr +
4089 offsetof(struct dev_info, devi_minor));
4090
4091 uintptr_t next = offsetof(struct ddi_minor_data, next);
4092 uintptr_t name = offsetof(struct ddi_minor_data,
4093 d_minor) + offsetof(struct ddi_minor, name);
4094 uintptr_t dev = offsetof(struct ddi_minor_data,
4095 d_minor) + offsetof(struct ddi_minor, dev);
4096 uintptr_t scout;
4097
4098 if (maddr != NULL)
4099 scout = dtrace_loadptr(maddr + next);
4100
4101 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4102 uint64_t m;
4103 #ifdef _LP64
4104 m = dtrace_load64(maddr + dev) & MAXMIN64;
4105 #else
4106 m = dtrace_load32(maddr + dev) & MAXMIN;
4107 #endif
4108 if (m != minor) {
4109 maddr = dtrace_loadptr(maddr + next);
4110
4111 if (scout == NULL)
4112 continue;
4113
4114 scout = dtrace_loadptr(scout + next);
4115
4116 if (scout == NULL)
4117 continue;
4118
4119 scout = dtrace_loadptr(scout + next);
4120
4121 if (scout == NULL)
4122 continue;
4123
4124 if (scout == maddr) {
4125 *flags |= CPU_DTRACE_ILLOP;
4126 break;
4127 }
4128
4129 continue;
4130 }
4131
4132 /*
4133 * We have the minor data. Now we need to
4134 * copy the minor's name into the end of the
4135 * pathname.
4136 */
4137 s = (char *)dtrace_loadptr(maddr + name);
4138 len = dtrace_strlen(s, size);
4139
4140 if (*flags & CPU_DTRACE_FAULT)
4141 break;
4142
4143 if (len != 0) {
4144 if ((end -= (len + 1)) < start)
4145 break;
4146
4147 *end = ':';
4148 }
4149
4150 for (i = 1; i <= len; i++)
4151 end[i] = dtrace_load8((uintptr_t)s++);
4152 break;
4153 }
4154 }
4155
4156 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4157 ddi_node_state_t devi_state;
4158
4159 devi_state = dtrace_load32(daddr +
4160 offsetof(struct dev_info, devi_node_state));
4161
4162 if (*flags & CPU_DTRACE_FAULT)
4163 break;
4164
4165 if (devi_state >= DS_INITIALIZED) {
4166 s = (char *)dtrace_loadptr(daddr +
4167 offsetof(struct dev_info, devi_addr));
4168 len = dtrace_strlen(s, size);
4169
4170 if (*flags & CPU_DTRACE_FAULT)
4171 break;
4172
4173 if (len != 0) {
4174 if ((end -= (len + 1)) < start)
4175 break;
4176
4177 *end = '@';
4178 }
4179
4180 for (i = 1; i <= len; i++)
4181 end[i] = dtrace_load8((uintptr_t)s++);
4182 }
4183
4184 /*
4185 * Now for the node name...
4186 */
4187 s = (char *)dtrace_loadptr(daddr +
4188 offsetof(struct dev_info, devi_node_name));
4189
4190 daddr = dtrace_loadptr(daddr +
4191 offsetof(struct dev_info, devi_parent));
4192
4193 /*
4194 * If our parent is NULL (that is, if we're the root
4195 * node), we're going to use the special path
4196 * "devices".
4197 */
4198 if (daddr == 0)
4199 s = "devices";
4200
4201 len = dtrace_strlen(s, size);
4202 if (*flags & CPU_DTRACE_FAULT)
4203 break;
4204
4205 if ((end -= (len + 1)) < start)
4206 break;
4207
4208 for (i = 1; i <= len; i++)
4209 end[i] = dtrace_load8((uintptr_t)s++);
4210 *end = '/';
4211
4212 if (depth++ > dtrace_devdepth_max) {
4213 *flags |= CPU_DTRACE_ILLOP;
4214 break;
4215 }
4216 }
4217
4218 if (end < start)
4219 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4220
4221 if (daddr == 0) {
4222 regs[rd] = (uintptr_t)end;
4223 mstate->dtms_scratch_ptr += size;
4224 }
4225
4226 break;
4227 }
4228 #endif
4229
4230 case DIF_SUBR_STRJOIN: {
4231 char *d = (char *)mstate->dtms_scratch_ptr;
4232 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4233 uintptr_t s1 = tupregs[0].dttk_value;
4234 uintptr_t s2 = tupregs[1].dttk_value;
4235 int i = 0;
4236
4237 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4238 !dtrace_strcanload(s2, size, mstate, vstate)) {
4239 regs[rd] = 0;
4240 break;
4241 }
4242
4243 if (!DTRACE_INSCRATCH(mstate, size)) {
4244 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4245 regs[rd] = 0;
4246 break;
4247 }
4248
4249 for (;;) {
4250 if (i >= size) {
4251 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4252 regs[rd] = 0;
4253 break;
4254 }
4255
4256 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4257 i--;
4258 break;
4259 }
4260 }
4261
4262 for (;;) {
4263 if (i >= size) {
4264 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4265 regs[rd] = 0;
4266 break;
4267 }
4268
4269 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4270 break;
4271 }
4272
4273 if (i < size) {
4274 mstate->dtms_scratch_ptr += i;
4275 regs[rd] = (uintptr_t)d;
4276 }
4277
4278 break;
4279 }
4280
4281 case DIF_SUBR_LLTOSTR: {
4282 int64_t i = (int64_t)tupregs[0].dttk_value;
4283 int64_t val = i < 0 ? i * -1 : i;
4284 uint64_t size = 22; /* enough room for 2^64 in decimal */
4285 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4286
4287 if (!DTRACE_INSCRATCH(mstate, size)) {
4288 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4289 regs[rd] = 0;
4290 break;
4291 }
4292
4293 for (*end-- = '\0'; val; val /= 10)
4294 *end-- = '0' + (val % 10);
4295
4296 if (i == 0)
4297 *end-- = '0';
4298
4299 if (i < 0)
4300 *end-- = '-';
4301
4302 regs[rd] = (uintptr_t)end + 1;
4303 mstate->dtms_scratch_ptr += size;
4304 break;
4305 }
4306
4307 case DIF_SUBR_HTONS:
4308 case DIF_SUBR_NTOHS:
4309 #if BYTE_ORDER == BIG_ENDIAN
4310 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4311 #else
4312 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4313 #endif
4314 break;
4315
4316
4317 case DIF_SUBR_HTONL:
4318 case DIF_SUBR_NTOHL:
4319 #if BYTE_ORDER == BIG_ENDIAN
4320 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4321 #else
4322 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4323 #endif
4324 break;
4325
4326
4327 case DIF_SUBR_HTONLL:
4328 case DIF_SUBR_NTOHLL:
4329 #if BYTE_ORDER == BIG_ENDIAN
4330 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4331 #else
4332 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4333 #endif
4334 break;
4335
4336
4337 case DIF_SUBR_DIRNAME:
4338 case DIF_SUBR_BASENAME: {
4339 char *dest = (char *)mstate->dtms_scratch_ptr;
4340 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4341 uintptr_t src = tupregs[0].dttk_value;
4342 int i, j, len = dtrace_strlen((char *)src, size);
4343 int lastbase = -1, firstbase = -1, lastdir = -1;
4344 int start, end;
4345
4346 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4347 regs[rd] = 0;
4348 break;
4349 }
4350
4351 if (!DTRACE_INSCRATCH(mstate, size)) {
4352 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4353 regs[rd] = 0;
4354 break;
4355 }
4356
4357 /*
4358 * The basename and dirname for a zero-length string is
4359 * defined to be "."
4360 */
4361 if (len == 0) {
4362 len = 1;
4363 src = (uintptr_t)".";
4364 }
4365
4366 /*
4367 * Start from the back of the string, moving back toward the
4368 * front until we see a character that isn't a slash. That
4369 * character is the last character in the basename.
4370 */
4371 for (i = len - 1; i >= 0; i--) {
4372 if (dtrace_load8(src + i) != '/')
4373 break;
4374 }
4375
4376 if (i >= 0)
4377 lastbase = i;
4378
4379 /*
4380 * Starting from the last character in the basename, move
4381 * towards the front until we find a slash. The character
4382 * that we processed immediately before that is the first
4383 * character in the basename.
4384 */
4385 for (; i >= 0; i--) {
4386 if (dtrace_load8(src + i) == '/')
4387 break;
4388 }
4389
4390 if (i >= 0)
4391 firstbase = i + 1;
4392
4393 /*
4394 * Now keep going until we find a non-slash character. That
4395 * character is the last character in the dirname.
4396 */
4397 for (; i >= 0; i--) {
4398 if (dtrace_load8(src + i) != '/')
4399 break;
4400 }
4401
4402 if (i >= 0)
4403 lastdir = i;
4404
4405 ASSERT(!(lastbase == -1 && firstbase != -1));
4406 ASSERT(!(firstbase == -1 && lastdir != -1));
4407
4408 if (lastbase == -1) {
4409 /*
4410 * We didn't find a non-slash character. We know that
4411 * the length is non-zero, so the whole string must be
4412 * slashes. In either the dirname or the basename
4413 * case, we return '/'.
4414 */
4415 ASSERT(firstbase == -1);
4416 firstbase = lastbase = lastdir = 0;
4417 }
4418
4419 if (firstbase == -1) {
4420 /*
4421 * The entire string consists only of a basename
4422 * component. If we're looking for dirname, we need
4423 * to change our string to be just "."; if we're
4424 * looking for a basename, we'll just set the first
4425 * character of the basename to be 0.
4426 */
4427 if (subr == DIF_SUBR_DIRNAME) {
4428 ASSERT(lastdir == -1);
4429 src = (uintptr_t)".";
4430 lastdir = 0;
4431 } else {
4432 firstbase = 0;
4433 }
4434 }
4435
4436 if (subr == DIF_SUBR_DIRNAME) {
4437 if (lastdir == -1) {
4438 /*
4439 * We know that we have a slash in the name --
4440 * or lastdir would be set to 0, above. And
4441 * because lastdir is -1, we know that this
4442 * slash must be the first character. (That
4443 * is, the full string must be of the form
4444 * "/basename".) In this case, the last
4445 * character of the directory name is 0.
4446 */
4447 lastdir = 0;
4448 }
4449
4450 start = 0;
4451 end = lastdir;
4452 } else {
4453 ASSERT(subr == DIF_SUBR_BASENAME);
4454 ASSERT(firstbase != -1 && lastbase != -1);
4455 start = firstbase;
4456 end = lastbase;
4457 }
4458
4459 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4460 dest[j] = dtrace_load8(src + i);
4461
4462 dest[j] = '\0';
4463 regs[rd] = (uintptr_t)dest;
4464 mstate->dtms_scratch_ptr += size;
4465 break;
4466 }
4467
4468 case DIF_SUBR_CLEANPATH: {
4469 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4470 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4471 uintptr_t src = tupregs[0].dttk_value;
4472 int i = 0, j = 0;
4473
4474 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4475 regs[rd] = 0;
4476 break;
4477 }
4478
4479 if (!DTRACE_INSCRATCH(mstate, size)) {
4480 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4481 regs[rd] = 0;
4482 break;
4483 }
4484
4485 /*
4486 * Move forward, loading each character.
4487 */
4488 do {
4489 c = dtrace_load8(src + i++);
4490 next:
4491 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4492 break;
4493
4494 if (c != '/') {
4495 dest[j++] = c;
4496 continue;
4497 }
4498
4499 c = dtrace_load8(src + i++);
4500
4501 if (c == '/') {
4502 /*
4503 * We have two slashes -- we can just advance
4504 * to the next character.
4505 */
4506 goto next;
4507 }
4508
4509 if (c != '.') {
4510 /*
4511 * This is not "." and it's not ".." -- we can
4512 * just store the "/" and this character and
4513 * drive on.
4514 */
4515 dest[j++] = '/';
4516 dest[j++] = c;
4517 continue;
4518 }
4519
4520 c = dtrace_load8(src + i++);
4521
4522 if (c == '/') {
4523 /*
4524 * This is a "/./" component. We're not going
4525 * to store anything in the destination buffer;
4526 * we're just going to go to the next component.
4527 */
4528 goto next;
4529 }
4530
4531 if (c != '.') {
4532 /*
4533 * This is not ".." -- we can just store the
4534 * "/." and this character and continue
4535 * processing.
4536 */
4537 dest[j++] = '/';
4538 dest[j++] = '.';
4539 dest[j++] = c;
4540 continue;
4541 }
4542
4543 c = dtrace_load8(src + i++);
4544
4545 if (c != '/' && c != '\0') {
4546 /*
4547 * This is not ".." -- it's "..[mumble]".
4548 * We'll store the "/.." and this character
4549 * and continue processing.
4550 */
4551 dest[j++] = '/';
4552 dest[j++] = '.';
4553 dest[j++] = '.';
4554 dest[j++] = c;
4555 continue;
4556 }
4557
4558 /*
4559 * This is "/../" or "/..\0". We need to back up
4560 * our destination pointer until we find a "/".
4561 */
4562 i--;
4563 while (j != 0 && dest[--j] != '/')
4564 continue;
4565
4566 if (c == '\0')
4567 dest[++j] = '/';
4568 } while (c != '\0');
4569
4570 dest[j] = '\0';
4571 regs[rd] = (uintptr_t)dest;
4572 mstate->dtms_scratch_ptr += size;
4573 break;
4574 }
4575
4576 case DIF_SUBR_INET_NTOA:
4577 case DIF_SUBR_INET_NTOA6:
4578 case DIF_SUBR_INET_NTOP: {
4579 size_t size;
4580 int af, argi, i;
4581 char *base, *end;
4582
4583 if (subr == DIF_SUBR_INET_NTOP) {
4584 af = (int)tupregs[0].dttk_value;
4585 argi = 1;
4586 } else {
4587 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4588 argi = 0;
4589 }
4590
4591 if (af == AF_INET) {
4592 ipaddr_t ip4;
4593 uint8_t *ptr8, val;
4594
4595 /*
4596 * Safely load the IPv4 address.
4597 */
4598 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4599
4600 /*
4601 * Check an IPv4 string will fit in scratch.
4602 */
4603 size = INET_ADDRSTRLEN;
4604 if (!DTRACE_INSCRATCH(mstate, size)) {
4605 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4606 regs[rd] = 0;
4607 break;
4608 }
4609 base = (char *)mstate->dtms_scratch_ptr;
4610 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4611
4612 /*
4613 * Stringify as a dotted decimal quad.
4614 */
4615 *end-- = '\0';
4616 ptr8 = (uint8_t *)&ip4;
4617 for (i = 3; i >= 0; i--) {
4618 val = ptr8[i];
4619
4620 if (val == 0) {
4621 *end-- = '0';
4622 } else {
4623 for (; val; val /= 10) {
4624 *end-- = '0' + (val % 10);
4625 }
4626 }
4627
4628 if (i > 0)
4629 *end-- = '.';
4630 }
4631 ASSERT(end + 1 >= base);
4632
4633 } else if (af == AF_INET6) {
4634 struct in6_addr ip6;
4635 int firstzero, tryzero, numzero, v6end;
4636 uint16_t val;
4637 const char digits[] = "0123456789abcdef";
4638
4639 /*
4640 * Stringify using RFC 1884 convention 2 - 16 bit
4641 * hexadecimal values with a zero-run compression.
4642 * Lower case hexadecimal digits are used.
4643 * eg, fe80::214:4fff:fe0b:76c8.
4644 * The IPv4 embedded form is returned for inet_ntop,
4645 * just the IPv4 string is returned for inet_ntoa6.
4646 */
4647
4648 /*
4649 * Safely load the IPv6 address.
4650 */
4651 dtrace_bcopy(
4652 (void *)(uintptr_t)tupregs[argi].dttk_value,
4653 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4654
4655 /*
4656 * Check an IPv6 string will fit in scratch.
4657 */
4658 size = INET6_ADDRSTRLEN;
4659 if (!DTRACE_INSCRATCH(mstate, size)) {
4660 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4661 regs[rd] = 0;
4662 break;
4663 }
4664 base = (char *)mstate->dtms_scratch_ptr;
4665 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4666 *end-- = '\0';
4667
4668 /*
4669 * Find the longest run of 16 bit zero values
4670 * for the single allowed zero compression - "::".
4671 */
4672 firstzero = -1;
4673 tryzero = -1;
4674 numzero = 1;
4675 for (i = 0; i < sizeof (struct in6_addr); i++) {
4676 #if defined(sun)
4677 if (ip6._S6_un._S6_u8[i] == 0 &&
4678 #else
4679 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4680 #endif
4681 tryzero == -1 && i % 2 == 0) {
4682 tryzero = i;
4683 continue;
4684 }
4685
4686 if (tryzero != -1 &&
4687 #if defined(sun)
4688 (ip6._S6_un._S6_u8[i] != 0 ||
4689 #else
4690 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
4691 #endif
4692 i == sizeof (struct in6_addr) - 1)) {
4693
4694 if (i - tryzero <= numzero) {
4695 tryzero = -1;
4696 continue;
4697 }
4698
4699 firstzero = tryzero;
4700 numzero = i - i % 2 - tryzero;
4701 tryzero = -1;
4702
4703 #if defined(sun)
4704 if (ip6._S6_un._S6_u8[i] == 0 &&
4705 #else
4706 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4707 #endif
4708 i == sizeof (struct in6_addr) - 1)
4709 numzero += 2;
4710 }
4711 }
4712 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4713
4714 /*
4715 * Check for an IPv4 embedded address.
4716 */
4717 v6end = sizeof (struct in6_addr) - 2;
4718 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4719 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4720 for (i = sizeof (struct in6_addr) - 1;
4721 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4722 ASSERT(end >= base);
4723
4724 #if defined(sun)
4725 val = ip6._S6_un._S6_u8[i];
4726 #else
4727 val = ip6.__u6_addr.__u6_addr8[i];
4728 #endif
4729
4730 if (val == 0) {
4731 *end-- = '0';
4732 } else {
4733 for (; val; val /= 10) {
4734 *end-- = '0' + val % 10;
4735 }
4736 }
4737
4738 if (i > DTRACE_V4MAPPED_OFFSET)
4739 *end-- = '.';
4740 }
4741
4742 if (subr == DIF_SUBR_INET_NTOA6)
4743 goto inetout;
4744
4745 /*
4746 * Set v6end to skip the IPv4 address that
4747 * we have already stringified.
4748 */
4749 v6end = 10;
4750 }
4751
4752 /*
4753 * Build the IPv6 string by working through the
4754 * address in reverse.
4755 */
4756 for (i = v6end; i >= 0; i -= 2) {
4757 ASSERT(end >= base);
4758
4759 if (i == firstzero + numzero - 2) {
4760 *end-- = ':';
4761 *end-- = ':';
4762 i -= numzero - 2;
4763 continue;
4764 }
4765
4766 if (i < 14 && i != firstzero - 2)
4767 *end-- = ':';
4768
4769 #if defined(sun)
4770 val = (ip6._S6_un._S6_u8[i] << 8) +
4771 ip6._S6_un._S6_u8[i + 1];
4772 #else
4773 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
4774 ip6.__u6_addr.__u6_addr8[i + 1];
4775 #endif
4776
4777 if (val == 0) {
4778 *end-- = '0';
4779 } else {
4780 for (; val; val /= 16) {
4781 *end-- = digits[val % 16];
4782 }
4783 }
4784 }
4785 ASSERT(end + 1 >= base);
4786
4787 } else {
4788 /*
4789 * The user didn't use AH_INET or AH_INET6.
4790 */
4791 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4792 regs[rd] = 0;
4793 break;
4794 }
4795
4796 inetout: regs[rd] = (uintptr_t)end + 1;
4797 mstate->dtms_scratch_ptr += size;
4798 break;
4799 }
4800
4801 case DIF_SUBR_MEMREF: {
4802 uintptr_t size = 2 * sizeof(uintptr_t);
4803 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4804 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
4805
4806 /* address and length */
4807 memref[0] = tupregs[0].dttk_value;
4808 memref[1] = tupregs[1].dttk_value;
4809
4810 regs[rd] = (uintptr_t) memref;
4811 mstate->dtms_scratch_ptr += scratch_size;
4812 break;
4813 }
4814
4815 case DIF_SUBR_TYPEREF: {
4816 uintptr_t size = 4 * sizeof(uintptr_t);
4817 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4818 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
4819
4820 /* address, num_elements, type_str, type_len */
4821 typeref[0] = tupregs[0].dttk_value;
4822 typeref[1] = tupregs[1].dttk_value;
4823 typeref[2] = tupregs[2].dttk_value;
4824 typeref[3] = tupregs[3].dttk_value;
4825
4826 regs[rd] = (uintptr_t) typeref;
4827 mstate->dtms_scratch_ptr += scratch_size;
4828 break;
4829 }
4830 }
4831 }
4832
4833 /*
4834 * Emulate the execution of DTrace IR instructions specified by the given
4835 * DIF object. This function is deliberately void of assertions as all of
4836 * the necessary checks are handled by a call to dtrace_difo_validate().
4837 */
4838 static uint64_t
4839 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4840 dtrace_vstate_t *vstate, dtrace_state_t *state)
4841 {
4842 const dif_instr_t *text = difo->dtdo_buf;
4843 const uint_t textlen = difo->dtdo_len;
4844 const char *strtab = difo->dtdo_strtab;
4845 const uint64_t *inttab = difo->dtdo_inttab;
4846
4847 uint64_t rval = 0;
4848 dtrace_statvar_t *svar;
4849 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4850 dtrace_difv_t *v;
4851 volatile uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
4852 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval;
4853
4854 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4855 uint64_t regs[DIF_DIR_NREGS];
4856 uint64_t *tmp;
4857
4858 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4859 int64_t cc_r;
4860 uint_t pc = 0, id, opc = 0;
4861 uint8_t ttop = 0;
4862 dif_instr_t instr;
4863 uint_t r1, r2, rd;
4864
4865 /*
4866 * We stash the current DIF object into the machine state: we need it
4867 * for subsequent access checking.
4868 */
4869 mstate->dtms_difo = difo;
4870
4871 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4872
4873 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4874 opc = pc;
4875
4876 instr = text[pc++];
4877 r1 = DIF_INSTR_R1(instr);
4878 r2 = DIF_INSTR_R2(instr);
4879 rd = DIF_INSTR_RD(instr);
4880
4881 switch (DIF_INSTR_OP(instr)) {
4882 case DIF_OP_OR:
4883 regs[rd] = regs[r1] | regs[r2];
4884 break;
4885 case DIF_OP_XOR:
4886 regs[rd] = regs[r1] ^ regs[r2];
4887 break;
4888 case DIF_OP_AND:
4889 regs[rd] = regs[r1] & regs[r2];
4890 break;
4891 case DIF_OP_SLL:
4892 regs[rd] = regs[r1] << regs[r2];
4893 break;
4894 case DIF_OP_SRL:
4895 regs[rd] = regs[r1] >> regs[r2];
4896 break;
4897 case DIF_OP_SUB:
4898 regs[rd] = regs[r1] - regs[r2];
4899 break;
4900 case DIF_OP_ADD:
4901 regs[rd] = regs[r1] + regs[r2];
4902 break;
4903 case DIF_OP_MUL:
4904 regs[rd] = regs[r1] * regs[r2];
4905 break;
4906 case DIF_OP_SDIV:
4907 if (regs[r2] == 0) {
4908 regs[rd] = 0;
4909 *flags |= CPU_DTRACE_DIVZERO;
4910 } else {
4911 regs[rd] = (int64_t)regs[r1] /
4912 (int64_t)regs[r2];
4913 }
4914 break;
4915
4916 case DIF_OP_UDIV:
4917 if (regs[r2] == 0) {
4918 regs[rd] = 0;
4919 *flags |= CPU_DTRACE_DIVZERO;
4920 } else {
4921 regs[rd] = regs[r1] / regs[r2];
4922 }
4923 break;
4924
4925 case DIF_OP_SREM:
4926 if (regs[r2] == 0) {
4927 regs[rd] = 0;
4928 *flags |= CPU_DTRACE_DIVZERO;
4929 } else {
4930 regs[rd] = (int64_t)regs[r1] %
4931 (int64_t)regs[r2];
4932 }
4933 break;
4934
4935 case DIF_OP_UREM:
4936 if (regs[r2] == 0) {
4937 regs[rd] = 0;
4938 *flags |= CPU_DTRACE_DIVZERO;
4939 } else {
4940 regs[rd] = regs[r1] % regs[r2];
4941 }
4942 break;
4943
4944 case DIF_OP_NOT:
4945 regs[rd] = ~regs[r1];
4946 break;
4947 case DIF_OP_MOV:
4948 regs[rd] = regs[r1];
4949 break;
4950 case DIF_OP_CMP:
4951 cc_r = regs[r1] - regs[r2];
4952 cc_n = cc_r < 0;
4953 cc_z = cc_r == 0;
4954 cc_v = 0;
4955 cc_c = regs[r1] < regs[r2];
4956 break;
4957 case DIF_OP_TST:
4958 cc_n = cc_v = cc_c = 0;
4959 cc_z = regs[r1] == 0;
4960 break;
4961 case DIF_OP_BA:
4962 pc = DIF_INSTR_LABEL(instr);
4963 break;
4964 case DIF_OP_BE:
4965 if (cc_z)
4966 pc = DIF_INSTR_LABEL(instr);
4967 break;
4968 case DIF_OP_BNE:
4969 if (cc_z == 0)
4970 pc = DIF_INSTR_LABEL(instr);
4971 break;
4972 case DIF_OP_BG:
4973 if ((cc_z | (cc_n ^ cc_v)) == 0)
4974 pc = DIF_INSTR_LABEL(instr);
4975 break;
4976 case DIF_OP_BGU:
4977 if ((cc_c | cc_z) == 0)
4978 pc = DIF_INSTR_LABEL(instr);
4979 break;
4980 case DIF_OP_BGE:
4981 if ((cc_n ^ cc_v) == 0)
4982 pc = DIF_INSTR_LABEL(instr);
4983 break;
4984 case DIF_OP_BGEU:
4985 if (cc_c == 0)
4986 pc = DIF_INSTR_LABEL(instr);
4987 break;
4988 case DIF_OP_BL:
4989 if (cc_n ^ cc_v)
4990 pc = DIF_INSTR_LABEL(instr);
4991 break;
4992 case DIF_OP_BLU:
4993 if (cc_c)
4994 pc = DIF_INSTR_LABEL(instr);
4995 break;
4996 case DIF_OP_BLE:
4997 if (cc_z | (cc_n ^ cc_v))
4998 pc = DIF_INSTR_LABEL(instr);
4999 break;
5000 case DIF_OP_BLEU:
5001 if (cc_c | cc_z)
5002 pc = DIF_INSTR_LABEL(instr);
5003 break;
5004 case DIF_OP_RLDSB:
5005 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5006 *flags |= CPU_DTRACE_KPRIV;
5007 *illval = regs[r1];
5008 break;
5009 }
5010 /*FALLTHROUGH*/
5011 case DIF_OP_LDSB:
5012 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5013 break;
5014 case DIF_OP_RLDSH:
5015 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5016 *flags |= CPU_DTRACE_KPRIV;
5017 *illval = regs[r1];
5018 break;
5019 }
5020 /*FALLTHROUGH*/
5021 case DIF_OP_LDSH:
5022 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5023 break;
5024 case DIF_OP_RLDSW:
5025 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5026 *flags |= CPU_DTRACE_KPRIV;
5027 *illval = regs[r1];
5028 break;
5029 }
5030 /*FALLTHROUGH*/
5031 case DIF_OP_LDSW:
5032 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5033 break;
5034 case DIF_OP_RLDUB:
5035 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5036 *flags |= CPU_DTRACE_KPRIV;
5037 *illval = regs[r1];
5038 break;
5039 }
5040 /*FALLTHROUGH*/
5041 case DIF_OP_LDUB:
5042 regs[rd] = dtrace_load8(regs[r1]);
5043 break;
5044 case DIF_OP_RLDUH:
5045 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5046 *flags |= CPU_DTRACE_KPRIV;
5047 *illval = regs[r1];
5048 break;
5049 }
5050 /*FALLTHROUGH*/
5051 case DIF_OP_LDUH:
5052 regs[rd] = dtrace_load16(regs[r1]);
5053 break;
5054 case DIF_OP_RLDUW:
5055 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5056 *flags |= CPU_DTRACE_KPRIV;
5057 *illval = regs[r1];
5058 break;
5059 }
5060 /*FALLTHROUGH*/
5061 case DIF_OP_LDUW:
5062 regs[rd] = dtrace_load32(regs[r1]);
5063 break;
5064 case DIF_OP_RLDX:
5065 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
5066 *flags |= CPU_DTRACE_KPRIV;
5067 *illval = regs[r1];
5068 break;
5069 }
5070 /*FALLTHROUGH*/
5071 case DIF_OP_LDX:
5072 regs[rd] = dtrace_load64(regs[r1]);
5073 break;
5074 case DIF_OP_ULDSB:
5075 regs[rd] = (int8_t)
5076 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5077 break;
5078 case DIF_OP_ULDSH:
5079 regs[rd] = (int16_t)
5080 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5081 break;
5082 case DIF_OP_ULDSW:
5083 regs[rd] = (int32_t)
5084 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5085 break;
5086 case DIF_OP_ULDUB:
5087 regs[rd] =
5088 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5089 break;
5090 case DIF_OP_ULDUH:
5091 regs[rd] =
5092 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5093 break;
5094 case DIF_OP_ULDUW:
5095 regs[rd] =
5096 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5097 break;
5098 case DIF_OP_ULDX:
5099 regs[rd] =
5100 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5101 break;
5102 case DIF_OP_RET:
5103 rval = regs[rd];
5104 pc = textlen;
5105 break;
5106 case DIF_OP_NOP:
5107 break;
5108 case DIF_OP_SETX:
5109 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5110 break;
5111 case DIF_OP_SETS:
5112 regs[rd] = (uint64_t)(uintptr_t)
5113 (strtab + DIF_INSTR_STRING(instr));
5114 break;
5115 case DIF_OP_SCMP: {
5116 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5117 uintptr_t s1 = regs[r1];
5118 uintptr_t s2 = regs[r2];
5119
5120 if (s1 != 0 &&
5121 !dtrace_strcanload(s1, sz, mstate, vstate))
5122 break;
5123 if (s2 != 0 &&
5124 !dtrace_strcanload(s2, sz, mstate, vstate))
5125 break;
5126
5127 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5128
5129 cc_n = cc_r < 0;
5130 cc_z = cc_r == 0;
5131 cc_v = cc_c = 0;
5132 break;
5133 }
5134 case DIF_OP_LDGA:
5135 regs[rd] = dtrace_dif_variable(mstate, state,
5136 r1, regs[r2]);
5137 break;
5138 case DIF_OP_LDGS:
5139 id = DIF_INSTR_VAR(instr);
5140
5141 if (id >= DIF_VAR_OTHER_UBASE) {
5142 uintptr_t a;
5143
5144 id -= DIF_VAR_OTHER_UBASE;
5145 svar = vstate->dtvs_globals[id];
5146 ASSERT(svar != NULL);
5147 v = &svar->dtsv_var;
5148
5149 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5150 regs[rd] = svar->dtsv_data;
5151 break;
5152 }
5153
5154 a = (uintptr_t)svar->dtsv_data;
5155
5156 if (*(uint8_t *)a == UINT8_MAX) {
5157 /*
5158 * If the 0th byte is set to UINT8_MAX
5159 * then this is to be treated as a
5160 * reference to a NULL variable.
5161 */
5162 regs[rd] = 0;
5163 } else {
5164 regs[rd] = a + sizeof (uint64_t);
5165 }
5166
5167 break;
5168 }
5169
5170 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5171 break;
5172
5173 case DIF_OP_STGS:
5174 id = DIF_INSTR_VAR(instr);
5175
5176 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5177 id -= DIF_VAR_OTHER_UBASE;
5178
5179 svar = vstate->dtvs_globals[id];
5180 ASSERT(svar != NULL);
5181 v = &svar->dtsv_var;
5182
5183 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5184 uintptr_t a = (uintptr_t)svar->dtsv_data;
5185
5186 ASSERT(a != 0);
5187 ASSERT(svar->dtsv_size != 0);
5188
5189 if (regs[rd] == 0) {
5190 *(uint8_t *)a = UINT8_MAX;
5191 break;
5192 } else {
5193 *(uint8_t *)a = 0;
5194 a += sizeof (uint64_t);
5195 }
5196 if (!dtrace_vcanload(
5197 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5198 mstate, vstate))
5199 break;
5200
5201 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5202 (void *)a, &v->dtdv_type);
5203 break;
5204 }
5205
5206 svar->dtsv_data = regs[rd];
5207 break;
5208
5209 case DIF_OP_LDTA:
5210 /*
5211 * There are no DTrace built-in thread-local arrays at
5212 * present. This opcode is saved for future work.
5213 */
5214 *flags |= CPU_DTRACE_ILLOP;
5215 regs[rd] = 0;
5216 break;
5217
5218 case DIF_OP_LDLS:
5219 id = DIF_INSTR_VAR(instr);
5220
5221 if (id < DIF_VAR_OTHER_UBASE) {
5222 /*
5223 * For now, this has no meaning.
5224 */
5225 regs[rd] = 0;
5226 break;
5227 }
5228
5229 id -= DIF_VAR_OTHER_UBASE;
5230
5231 ASSERT(id < vstate->dtvs_nlocals);
5232 ASSERT(vstate->dtvs_locals != NULL);
5233
5234 svar = vstate->dtvs_locals[id];
5235 ASSERT(svar != NULL);
5236 v = &svar->dtsv_var;
5237
5238 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5239 uintptr_t a = (uintptr_t)svar->dtsv_data;
5240 size_t sz = v->dtdv_type.dtdt_size;
5241
5242 sz += sizeof (uint64_t);
5243 ASSERT(svar->dtsv_size == NCPU * sz);
5244 a += curcpu_id * sz;
5245
5246 if (*(uint8_t *)a == UINT8_MAX) {
5247 /*
5248 * If the 0th byte is set to UINT8_MAX
5249 * then this is to be treated as a
5250 * reference to a NULL variable.
5251 */
5252 regs[rd] = 0;
5253 } else {
5254 regs[rd] = a + sizeof (uint64_t);
5255 }
5256
5257 break;
5258 }
5259
5260 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5261 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5262 regs[rd] = tmp[curcpu_id];
5263 break;
5264
5265 case DIF_OP_STLS:
5266 id = DIF_INSTR_VAR(instr);
5267
5268 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5269 id -= DIF_VAR_OTHER_UBASE;
5270 ASSERT(id < vstate->dtvs_nlocals);
5271
5272 ASSERT(vstate->dtvs_locals != NULL);
5273 svar = vstate->dtvs_locals[id];
5274 ASSERT(svar != NULL);
5275 v = &svar->dtsv_var;
5276
5277 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5278 uintptr_t a = (uintptr_t)svar->dtsv_data;
5279 size_t sz = v->dtdv_type.dtdt_size;
5280
5281 sz += sizeof (uint64_t);
5282 ASSERT(svar->dtsv_size == NCPU * sz);
5283 a += curcpu_id * sz;
5284
5285 if (regs[rd] == 0) {
5286 *(uint8_t *)a = UINT8_MAX;
5287 break;
5288 } else {
5289 *(uint8_t *)a = 0;
5290 a += sizeof (uint64_t);
5291 }
5292
5293 if (!dtrace_vcanload(
5294 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5295 mstate, vstate))
5296 break;
5297
5298 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5299 (void *)a, &v->dtdv_type);
5300 break;
5301 }
5302
5303 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5304 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5305 tmp[curcpu_id] = regs[rd];
5306 break;
5307
5308 case DIF_OP_LDTS: {
5309 dtrace_dynvar_t *dvar;
5310 dtrace_key_t *key;
5311
5312 id = DIF_INSTR_VAR(instr);
5313 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5314 id -= DIF_VAR_OTHER_UBASE;
5315 v = &vstate->dtvs_tlocals[id];
5316
5317 key = &tupregs[DIF_DTR_NREGS];
5318 key[0].dttk_value = (uint64_t)id;
5319 key[0].dttk_size = 0;
5320 DTRACE_TLS_THRKEY(key[1].dttk_value);
5321 key[1].dttk_size = 0;
5322
5323 dvar = dtrace_dynvar(dstate, 2, key,
5324 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5325 mstate, vstate);
5326
5327 if (dvar == NULL) {
5328 regs[rd] = 0;
5329 break;
5330 }
5331
5332 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5333 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5334 } else {
5335 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5336 }
5337
5338 break;
5339 }
5340
5341 case DIF_OP_STTS: {
5342 dtrace_dynvar_t *dvar;
5343 dtrace_key_t *key;
5344
5345 id = DIF_INSTR_VAR(instr);
5346 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5347 id -= DIF_VAR_OTHER_UBASE;
5348
5349 key = &tupregs[DIF_DTR_NREGS];
5350 key[0].dttk_value = (uint64_t)id;
5351 key[0].dttk_size = 0;
5352 DTRACE_TLS_THRKEY(key[1].dttk_value);
5353 key[1].dttk_size = 0;
5354 v = &vstate->dtvs_tlocals[id];
5355
5356 dvar = dtrace_dynvar(dstate, 2, key,
5357 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5358 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5359 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5360 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5361
5362 /*
5363 * Given that we're storing to thread-local data,
5364 * we need to flush our predicate cache.
5365 */
5366 curthread->t_predcache = 0;
5367
5368 if (dvar == NULL)
5369 break;
5370
5371 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5372 if (!dtrace_vcanload(
5373 (void *)(uintptr_t)regs[rd],
5374 &v->dtdv_type, mstate, vstate))
5375 break;
5376
5377 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5378 dvar->dtdv_data, &v->dtdv_type);
5379 } else {
5380 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5381 }
5382
5383 break;
5384 }
5385
5386 case DIF_OP_SRA:
5387 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5388 break;
5389
5390 case DIF_OP_CALL:
5391 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5392 regs, tupregs, ttop, mstate, state);
5393 break;
5394
5395 case DIF_OP_PUSHTR:
5396 if (ttop == DIF_DTR_NREGS) {
5397 *flags |= CPU_DTRACE_TUPOFLOW;
5398 break;
5399 }
5400
5401 if (r1 == DIF_TYPE_STRING) {
5402 /*
5403 * If this is a string type and the size is 0,
5404 * we'll use the system-wide default string
5405 * size. Note that we are _not_ looking at
5406 * the value of the DTRACEOPT_STRSIZE option;
5407 * had this been set, we would expect to have
5408 * a non-zero size value in the "pushtr".
5409 */
5410 tupregs[ttop].dttk_size =
5411 dtrace_strlen((char *)(uintptr_t)regs[rd],
5412 regs[r2] ? regs[r2] :
5413 dtrace_strsize_default) + 1;
5414 } else {
5415 tupregs[ttop].dttk_size = regs[r2];
5416 }
5417
5418 tupregs[ttop++].dttk_value = regs[rd];
5419 break;
5420
5421 case DIF_OP_PUSHTV:
5422 if (ttop == DIF_DTR_NREGS) {
5423 *flags |= CPU_DTRACE_TUPOFLOW;
5424 break;
5425 }
5426
5427 tupregs[ttop].dttk_value = regs[rd];
5428 tupregs[ttop++].dttk_size = 0;
5429 break;
5430
5431 case DIF_OP_POPTS:
5432 if (ttop != 0)
5433 ttop--;
5434 break;
5435
5436 case DIF_OP_FLUSHTS:
5437 ttop = 0;
5438 break;
5439
5440 case DIF_OP_LDGAA:
5441 case DIF_OP_LDTAA: {
5442 dtrace_dynvar_t *dvar;
5443 dtrace_key_t *key = tupregs;
5444 uint_t nkeys = ttop;
5445
5446 id = DIF_INSTR_VAR(instr);
5447 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5448 id -= DIF_VAR_OTHER_UBASE;
5449
5450 key[nkeys].dttk_value = (uint64_t)id;
5451 key[nkeys++].dttk_size = 0;
5452
5453 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5454 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5455 key[nkeys++].dttk_size = 0;
5456 v = &vstate->dtvs_tlocals[id];
5457 } else {
5458 v = &vstate->dtvs_globals[id]->dtsv_var;
5459 }
5460
5461 dvar = dtrace_dynvar(dstate, nkeys, key,
5462 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5463 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5464 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5465
5466 if (dvar == NULL) {
5467 regs[rd] = 0;
5468 break;
5469 }
5470
5471 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5472 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5473 } else {
5474 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5475 }
5476
5477 break;
5478 }
5479
5480 case DIF_OP_STGAA:
5481 case DIF_OP_STTAA: {
5482 dtrace_dynvar_t *dvar;
5483 dtrace_key_t *key = tupregs;
5484 uint_t nkeys = ttop;
5485
5486 id = DIF_INSTR_VAR(instr);
5487 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5488 id -= DIF_VAR_OTHER_UBASE;
5489
5490 key[nkeys].dttk_value = (uint64_t)id;
5491 key[nkeys++].dttk_size = 0;
5492
5493 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5494 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5495 key[nkeys++].dttk_size = 0;
5496 v = &vstate->dtvs_tlocals[id];
5497 } else {
5498 v = &vstate->dtvs_globals[id]->dtsv_var;
5499 }
5500
5501 dvar = dtrace_dynvar(dstate, nkeys, key,
5502 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5503 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5504 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5505 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5506
5507 if (dvar == NULL)
5508 break;
5509
5510 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5511 if (!dtrace_vcanload(
5512 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5513 mstate, vstate))
5514 break;
5515
5516 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5517 dvar->dtdv_data, &v->dtdv_type);
5518 } else {
5519 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5520 }
5521
5522 break;
5523 }
5524
5525 case DIF_OP_ALLOCS: {
5526 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5527 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5528
5529 /*
5530 * Rounding up the user allocation size could have
5531 * overflowed large, bogus allocations (like -1ULL) to
5532 * 0.
5533 */
5534 if (size < regs[r1] ||
5535 !DTRACE_INSCRATCH(mstate, size)) {
5536 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5537 regs[rd] = 0;
5538 break;
5539 }
5540
5541 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5542 mstate->dtms_scratch_ptr += size;
5543 regs[rd] = ptr;
5544 break;
5545 }
5546
5547 case DIF_OP_COPYS:
5548 if (!dtrace_canstore(regs[rd], regs[r2],
5549 mstate, vstate)) {
5550 *flags |= CPU_DTRACE_BADADDR;
5551 *illval = regs[rd];
5552 break;
5553 }
5554 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5555 break;
5556
5557 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5558 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5559 break;
5560
5561 case DIF_OP_STB:
5562 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5563 *flags |= CPU_DTRACE_BADADDR;
5564 *illval = regs[rd];
5565 break;
5566 }
5567 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5568 break;
5569
5570 case DIF_OP_STH:
5571 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5572 *flags |= CPU_DTRACE_BADADDR;
5573 *illval = regs[rd];
5574 break;
5575 }
5576 if (regs[rd] & 1) {
5577 *flags |= CPU_DTRACE_BADALIGN;
5578 *illval = regs[rd];
5579 break;
5580 }
5581 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5582 break;
5583
5584 case DIF_OP_STW:
5585 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5586 *flags |= CPU_DTRACE_BADADDR;
5587 *illval = regs[rd];
5588 break;
5589 }
5590 if (regs[rd] & 3) {
5591 *flags |= CPU_DTRACE_BADALIGN;
5592 *illval = regs[rd];
5593 break;
5594 }
5595 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5596 break;
5597
5598 case DIF_OP_STX:
5599 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5600 *flags |= CPU_DTRACE_BADADDR;
5601 *illval = regs[rd];
5602 break;
5603 }
5604 if (regs[rd] & 7) {
5605 *flags |= CPU_DTRACE_BADALIGN;
5606 *illval = regs[rd];
5607 break;
5608 }
5609 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5610 break;
5611 }
5612 }
5613
5614 if (!(*flags & CPU_DTRACE_FAULT))
5615 return (rval);
5616
5617 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5618 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5619
5620 return (0);
5621 }
5622
5623 static void
5624 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5625 {
5626 dtrace_probe_t *probe = ecb->dte_probe;
5627 dtrace_provider_t *prov = probe->dtpr_provider;
5628 char c[DTRACE_FULLNAMELEN + 80], *str;
5629 const char *msg = "dtrace: breakpoint action at probe ";
5630 const char *ecbmsg = " (ecb ";
5631 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5632 uintptr_t val = (uintptr_t)ecb;
5633 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5634
5635 if (dtrace_destructive_disallow)
5636 return;
5637
5638 /*
5639 * It's impossible to be taking action on the NULL probe.
5640 */
5641 ASSERT(probe != NULL);
5642
5643 /*
5644 * This is a poor man's (destitute man's?) snprintf(): we want to
5645 * print the provider name, module name, function name and name of
5646 * the probe, along with the hex address of the ECB with the breakpoint
5647 * action -- all of which we must place in the character buffer by
5648 * hand.
5649 */
5650 while (*msg != '\0')
5651 c[i++] = *msg++;
5652
5653 for (str = prov->dtpv_name; *str != '\0'; str++)
5654 c[i++] = *str;
5655 c[i++] = ':';
5656
5657 for (str = probe->dtpr_mod; *str != '\0'; str++)
5658 c[i++] = *str;
5659 c[i++] = ':';
5660
5661 for (str = probe->dtpr_func; *str != '\0'; str++)
5662 c[i++] = *str;
5663 c[i++] = ':';
5664
5665 for (str = probe->dtpr_name; *str != '\0'; str++)
5666 c[i++] = *str;
5667
5668 while (*ecbmsg != '\0')
5669 c[i++] = *ecbmsg++;
5670
5671 while (shift >= 0) {
5672 mask = (uintptr_t)0xf << shift;
5673
5674 if (val >= ((uintptr_t)1 << shift))
5675 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5676 shift -= 4;
5677 }
5678
5679 c[i++] = ')';
5680 c[i] = '\0';
5681
5682 #if defined(sun)
5683 debug_enter(c);
5684 #else
5685 #ifdef DDB
5686 db_printf("%s\n", c);
5687 Debugger();
5688 #else
5689 printf("%s ignored\n", c);
5690 #endif /* DDB */
5691 #endif
5692 }
5693
5694 static void
5695 dtrace_action_panic(dtrace_ecb_t *ecb)
5696 {
5697 dtrace_probe_t *probe = ecb->dte_probe;
5698
5699 /*
5700 * It's impossible to be taking action on the NULL probe.
5701 */
5702 ASSERT(probe != NULL);
5703
5704 if (dtrace_destructive_disallow)
5705 return;
5706
5707 if (dtrace_panicked != NULL)
5708 return;
5709
5710 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5711 return;
5712
5713 /*
5714 * We won the right to panic. (We want to be sure that only one
5715 * thread calls panic() from dtrace_probe(), and that panic() is
5716 * called exactly once.)
5717 */
5718 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5719 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5720 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5721 }
5722
5723 static void
5724 dtrace_action_raise(uint64_t sig)
5725 {
5726 if (dtrace_destructive_disallow)
5727 return;
5728
5729 if (sig >= NSIG) {
5730 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5731 return;
5732 }
5733
5734 #if defined(sun)
5735 /*
5736 * raise() has a queue depth of 1 -- we ignore all subsequent
5737 * invocations of the raise() action.
5738 */
5739 if (curthread->t_dtrace_sig == 0)
5740 curthread->t_dtrace_sig = (uint8_t)sig;
5741
5742 curthread->t_sig_check = 1;
5743 aston(curthread);
5744 #else
5745 struct proc *p = curproc;
5746 mutex_enter(proc_lock);
5747 psignal(p, sig);
5748 mutex_exit(proc_lock);
5749 #endif
5750 }
5751
5752 static void
5753 dtrace_action_stop(void)
5754 {
5755 if (dtrace_destructive_disallow)
5756 return;
5757
5758 #if defined(sun)
5759 if (!curthread->t_dtrace_stop) {
5760 curthread->t_dtrace_stop = 1;
5761 curthread->t_sig_check = 1;
5762 aston(curthread);
5763 }
5764 #else
5765 struct proc *p = curproc;
5766 mutex_enter(proc_lock);
5767 psignal(p, SIGSTOP);
5768 mutex_exit(proc_lock);
5769 #endif
5770 }
5771
5772 static void
5773 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5774 {
5775 #if 0 /* XXX TBD - needs solaris_cpu */
5776 hrtime_t now;
5777 volatile uint16_t *flags;
5778 #if defined(sun)
5779 cpu_t *cpu = CPU;
5780 #else
5781 cpu_t *cpu = &solaris_cpu[curcpu_id];
5782 #endif
5783
5784 if (dtrace_destructive_disallow)
5785 return;
5786
5787 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5788
5789 now = dtrace_gethrtime();
5790
5791 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5792 /*
5793 * We need to advance the mark to the current time.
5794 */
5795 cpu->cpu_dtrace_chillmark = now;
5796 cpu->cpu_dtrace_chilled = 0;
5797 }
5798
5799 /*
5800 * Now check to see if the requested chill time would take us over
5801 * the maximum amount of time allowed in the chill interval. (Or
5802 * worse, if the calculation itself induces overflow.)
5803 */
5804 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5805 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5806 *flags |= CPU_DTRACE_ILLOP;
5807 return;
5808 }
5809
5810 while (dtrace_gethrtime() - now < val)
5811 continue;
5812
5813 /*
5814 * Normally, we assure that the value of the variable "timestamp" does
5815 * not change within an ECB. The presence of chill() represents an
5816 * exception to this rule, however.
5817 */
5818 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5819 cpu->cpu_dtrace_chilled += val;
5820 #endif
5821 }
5822
5823 static void
5824 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5825 uint64_t *buf, uint64_t arg)
5826 {
5827 int nframes = DTRACE_USTACK_NFRAMES(arg);
5828 int strsize = DTRACE_USTACK_STRSIZE(arg);
5829 uint64_t *pcs = &buf[1], *fps;
5830 char *str = (char *)&pcs[nframes];
5831 int size, offs = 0, i, j;
5832 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5833 uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
5834 char *sym;
5835
5836 /*
5837 * Should be taking a faster path if string space has not been
5838 * allocated.
5839 */
5840 ASSERT(strsize != 0);
5841
5842 /*
5843 * We will first allocate some temporary space for the frame pointers.
5844 */
5845 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5846 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5847 (nframes * sizeof (uint64_t));
5848
5849 if (!DTRACE_INSCRATCH(mstate, size)) {
5850 /*
5851 * Not enough room for our frame pointers -- need to indicate
5852 * that we ran out of scratch space.
5853 */
5854 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5855 return;
5856 }
5857
5858 mstate->dtms_scratch_ptr += size;
5859 saved = mstate->dtms_scratch_ptr;
5860
5861 /*
5862 * Now get a stack with both program counters and frame pointers.
5863 */
5864 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5865 dtrace_getufpstack(buf, fps, nframes + 1);
5866 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5867
5868 /*
5869 * If that faulted, we're cooked.
5870 */
5871 if (*flags & CPU_DTRACE_FAULT)
5872 goto out;
5873
5874 /*
5875 * Now we want to walk up the stack, calling the USTACK helper. For
5876 * each iteration, we restore the scratch pointer.
5877 */
5878 for (i = 0; i < nframes; i++) {
5879 mstate->dtms_scratch_ptr = saved;
5880
5881 if (offs >= strsize)
5882 break;
5883
5884 sym = (char *)(uintptr_t)dtrace_helper(
5885 DTRACE_HELPER_ACTION_USTACK,
5886 mstate, state, pcs[i], fps[i]);
5887
5888 /*
5889 * If we faulted while running the helper, we're going to
5890 * clear the fault and null out the corresponding string.
5891 */
5892 if (*flags & CPU_DTRACE_FAULT) {
5893 *flags &= ~CPU_DTRACE_FAULT;
5894 str[offs++] = '\0';
5895 continue;
5896 }
5897
5898 if (sym == NULL) {
5899 str[offs++] = '\0';
5900 continue;
5901 }
5902
5903 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5904
5905 /*
5906 * Now copy in the string that the helper returned to us.
5907 */
5908 for (j = 0; offs + j < strsize; j++) {
5909 if ((str[offs + j] = sym[j]) == '\0')
5910 break;
5911 }
5912
5913 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5914
5915 offs += j + 1;
5916 }
5917
5918 if (offs >= strsize) {
5919 /*
5920 * If we didn't have room for all of the strings, we don't
5921 * abort processing -- this needn't be a fatal error -- but we
5922 * still want to increment a counter (dts_stkstroverflows) to
5923 * allow this condition to be warned about. (If this is from
5924 * a jstack() action, it is easily tuned via jstackstrsize.)
5925 */
5926 dtrace_error(&state->dts_stkstroverflows);
5927 }
5928
5929 while (offs < strsize)
5930 str[offs++] = '\0';
5931
5932 out:
5933 mstate->dtms_scratch_ptr = old;
5934 }
5935
5936 /*
5937 * If you're looking for the epicenter of DTrace, you just found it. This
5938 * is the function called by the provider to fire a probe -- from which all
5939 * subsequent probe-context DTrace activity emanates.
5940 */
5941 void
5942 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5943 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5944 {
5945 processorid_t cpuid;
5946 dtrace_icookie_t cookie;
5947 dtrace_probe_t *probe;
5948 dtrace_mstate_t mstate;
5949 dtrace_ecb_t *ecb;
5950 dtrace_action_t *act;
5951 intptr_t offs;
5952 size_t size;
5953 int vtime, onintr;
5954 volatile uint16_t *flags;
5955 hrtime_t now;
5956
5957 #if defined(sun)
5958 /*
5959 * Kick out immediately if this CPU is still being born (in which case
5960 * curthread will be set to -1) or the current thread can't allow
5961 * probes in its current context.
5962 */
5963 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5964 return;
5965 #endif
5966
5967 cookie = dtrace_interrupt_disable();
5968 probe = dtrace_probes[id - 1];
5969 cpuid = curcpu_id;
5970 onintr = CPU_ON_INTR(CPU);
5971
5972 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5973 probe->dtpr_predcache == curthread->t_predcache) {
5974 /*
5975 * We have hit in the predicate cache; we know that
5976 * this predicate would evaluate to be false.
5977 */
5978 dtrace_interrupt_enable(cookie);
5979 return;
5980 }
5981
5982 #if defined(sun)
5983 if (panic_quiesce) {
5984 #else
5985 if (panicstr != NULL) {
5986 #endif
5987 /*
5988 * We don't trace anything if we're panicking.
5989 */
5990 dtrace_interrupt_enable(cookie);
5991 return;
5992 }
5993
5994 now = dtrace_gethrtime();
5995 vtime = dtrace_vtime_references != 0;
5996
5997 if (vtime && curthread->t_dtrace_start)
5998 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5999
6000 mstate.dtms_difo = NULL;
6001 mstate.dtms_probe = probe;
6002 mstate.dtms_strtok = 0;
6003 mstate.dtms_arg[0] = arg0;
6004 mstate.dtms_arg[1] = arg1;
6005 mstate.dtms_arg[2] = arg2;
6006 mstate.dtms_arg[3] = arg3;
6007 mstate.dtms_arg[4] = arg4;
6008
6009 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6010
6011 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6012 dtrace_predicate_t *pred = ecb->dte_predicate;
6013 dtrace_state_t *state = ecb->dte_state;
6014 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6015 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6016 dtrace_vstate_t *vstate = &state->dts_vstate;
6017 dtrace_provider_t *prov = probe->dtpr_provider;
6018 int committed = 0;
6019 caddr_t tomax;
6020
6021 /*
6022 * A little subtlety with the following (seemingly innocuous)
6023 * declaration of the automatic 'val': by looking at the
6024 * code, you might think that it could be declared in the
6025 * action processing loop, below. (That is, it's only used in
6026 * the action processing loop.) However, it must be declared
6027 * out of that scope because in the case of DIF expression
6028 * arguments to aggregating actions, one iteration of the
6029 * action loop will use the last iteration's value.
6030 */
6031 uint64_t val = 0;
6032
6033 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6034 *flags &= ~CPU_DTRACE_ERROR;
6035
6036 if (prov == dtrace_provider) {
6037 /*
6038 * If dtrace itself is the provider of this probe,
6039 * we're only going to continue processing the ECB if
6040 * arg0 (the dtrace_state_t) is equal to the ECB's
6041 * creating state. (This prevents disjoint consumers
6042 * from seeing one another's metaprobes.)
6043 */
6044 if (arg0 != (uint64_t)(uintptr_t)state)
6045 continue;
6046 }
6047
6048 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6049 /*
6050 * We're not currently active. If our provider isn't
6051 * the dtrace pseudo provider, we're not interested.
6052 */
6053 if (prov != dtrace_provider)
6054 continue;
6055
6056 /*
6057 * Now we must further check if we are in the BEGIN
6058 * probe. If we are, we will only continue processing
6059 * if we're still in WARMUP -- if one BEGIN enabling
6060 * has invoked the exit() action, we don't want to
6061 * evaluate subsequent BEGIN enablings.
6062 */
6063 if (probe->dtpr_id == dtrace_probeid_begin &&
6064 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
6065 ASSERT(state->dts_activity ==
6066 DTRACE_ACTIVITY_DRAINING);
6067 continue;
6068 }
6069 }
6070
6071 if (ecb->dte_cond) {
6072 /*
6073 * If the dte_cond bits indicate that this
6074 * consumer is only allowed to see user-mode firings
6075 * of this probe, call the provider's dtps_usermode()
6076 * entry point to check that the probe was fired
6077 * while in a user context. Skip this ECB if that's
6078 * not the case.
6079 */
6080 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
6081 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
6082 probe->dtpr_id, probe->dtpr_arg) == 0)
6083 continue;
6084
6085 #if defined(sun)
6086 /*
6087 * This is more subtle than it looks. We have to be
6088 * absolutely certain that CRED() isn't going to
6089 * change out from under us so it's only legit to
6090 * examine that structure if we're in constrained
6091 * situations. Currently, the only times we'll this
6092 * check is if a non-super-user has enabled the
6093 * profile or syscall providers -- providers that
6094 * allow visibility of all processes. For the
6095 * profile case, the check above will ensure that
6096 * we're examining a user context.
6097 */
6098 if (ecb->dte_cond & DTRACE_COND_OWNER) {
6099 cred_t *cr;
6100 cred_t *s_cr =
6101 ecb->dte_state->dts_cred.dcr_cred;
6102 proc_t *proc;
6103
6104 ASSERT(s_cr != NULL);
6105
6106 if ((cr = CRED()) == NULL ||
6107 s_cr->cr_uid != cr->cr_uid ||
6108 s_cr->cr_uid != cr->cr_ruid ||
6109 s_cr->cr_uid != cr->cr_suid ||
6110 s_cr->cr_gid != cr->cr_gid ||
6111 s_cr->cr_gid != cr->cr_rgid ||
6112 s_cr->cr_gid != cr->cr_sgid ||
6113 (proc = ttoproc(curthread)) == NULL ||
6114 (proc->p_flag & SNOCD))
6115 continue;
6116 }
6117
6118 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
6119 cred_t *cr;
6120 cred_t *s_cr =
6121 ecb->dte_state->dts_cred.dcr_cred;
6122
6123 ASSERT(s_cr != NULL);
6124
6125 if ((cr = CRED()) == NULL ||
6126 s_cr->cr_zone->zone_id !=
6127 cr->cr_zone->zone_id)
6128 continue;
6129 }
6130 #endif
6131 }
6132
6133 if (now - state->dts_alive > dtrace_deadman_timeout) {
6134 /*
6135 * We seem to be dead. Unless we (a) have kernel
6136 * destructive permissions (b) have expicitly enabled
6137 * destructive actions and (c) destructive actions have
6138 * not been disabled, we're going to transition into
6139 * the KILLED state, from which no further processing
6140 * on this state will be performed.
6141 */
6142 if (!dtrace_priv_kernel_destructive(state) ||
6143 !state->dts_cred.dcr_destructive ||
6144 dtrace_destructive_disallow) {
6145 void *activity = &state->dts_activity;
6146 dtrace_activity_t current;
6147
6148 do {
6149 current = state->dts_activity;
6150 } while (dtrace_cas32(activity, current,
6151 DTRACE_ACTIVITY_KILLED) != current);
6152
6153 continue;
6154 }
6155 }
6156
6157 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6158 ecb->dte_alignment, state, &mstate)) < 0)
6159 continue;
6160
6161 tomax = buf->dtb_tomax;
6162 ASSERT(tomax != NULL);
6163
6164 if (ecb->dte_size != 0)
6165 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6166
6167 mstate.dtms_epid = ecb->dte_epid;
6168 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6169
6170 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6171 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6172 else
6173 mstate.dtms_access = 0;
6174
6175 if (pred != NULL) {
6176 dtrace_difo_t *dp = pred->dtp_difo;
6177 int rval;
6178
6179 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6180
6181 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6182 dtrace_cacheid_t cid = probe->dtpr_predcache;
6183
6184 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6185 /*
6186 * Update the predicate cache...
6187 */
6188 ASSERT(cid == pred->dtp_cacheid);
6189 curthread->t_predcache = cid;
6190 }
6191
6192 continue;
6193 }
6194 }
6195
6196 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6197 act != NULL; act = act->dta_next) {
6198 size_t valoffs;
6199 dtrace_difo_t *dp;
6200 dtrace_recdesc_t *rec = &act->dta_rec;
6201
6202 size = rec->dtrd_size;
6203 valoffs = offs + rec->dtrd_offset;
6204
6205 if (DTRACEACT_ISAGG(act->dta_kind)) {
6206 uint64_t v = 0xbad;
6207 dtrace_aggregation_t *agg;
6208
6209 agg = (dtrace_aggregation_t *)act;
6210
6211 if ((dp = act->dta_difo) != NULL)
6212 v = dtrace_dif_emulate(dp,
6213 &mstate, vstate, state);
6214
6215 if (*flags & CPU_DTRACE_ERROR)
6216 continue;
6217
6218 /*
6219 * Note that we always pass the expression
6220 * value from the previous iteration of the
6221 * action loop. This value will only be used
6222 * if there is an expression argument to the
6223 * aggregating action, denoted by the
6224 * dtag_hasarg field.
6225 */
6226 dtrace_aggregate(agg, buf,
6227 offs, aggbuf, v, val);
6228 continue;
6229 }
6230
6231 switch (act->dta_kind) {
6232 case DTRACEACT_STOP:
6233 if (dtrace_priv_proc_destructive(state))
6234 dtrace_action_stop();
6235 continue;
6236
6237 case DTRACEACT_BREAKPOINT:
6238 if (dtrace_priv_kernel_destructive(state))
6239 dtrace_action_breakpoint(ecb);
6240 continue;
6241
6242 case DTRACEACT_PANIC:
6243 if (dtrace_priv_kernel_destructive(state))
6244 dtrace_action_panic(ecb);
6245 continue;
6246
6247 case DTRACEACT_STACK:
6248 if (!dtrace_priv_kernel(state))
6249 continue;
6250
6251 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6252 size / sizeof (pc_t), probe->dtpr_aframes,
6253 DTRACE_ANCHORED(probe) ? NULL :
6254 (uint32_t *)arg0);
6255 continue;
6256
6257 case DTRACEACT_JSTACK:
6258 case DTRACEACT_USTACK:
6259 if (!dtrace_priv_proc(state))
6260 continue;
6261
6262 /*
6263 * See comment in DIF_VAR_PID.
6264 */
6265 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6266 CPU_ON_INTR(CPU)) {
6267 int depth = DTRACE_USTACK_NFRAMES(
6268 rec->dtrd_arg) + 1;
6269
6270 dtrace_bzero((void *)(tomax + valoffs),
6271 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6272 + depth * sizeof (uint64_t));
6273
6274 continue;
6275 }
6276
6277 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6278 curproc->p_dtrace_helpers != NULL) {
6279 /*
6280 * This is the slow path -- we have
6281 * allocated string space, and we're
6282 * getting the stack of a process that
6283 * has helpers. Call into a separate
6284 * routine to perform this processing.
6285 */
6286 dtrace_action_ustack(&mstate, state,
6287 (uint64_t *)(tomax + valoffs),
6288 rec->dtrd_arg);
6289 continue;
6290 }
6291
6292 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6293 dtrace_getupcstack((uint64_t *)
6294 (tomax + valoffs),
6295 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6296 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6297 continue;
6298
6299 default:
6300 break;
6301 }
6302
6303 dp = act->dta_difo;
6304 ASSERT(dp != NULL);
6305
6306 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6307
6308 if (*flags & CPU_DTRACE_ERROR)
6309 continue;
6310
6311 switch (act->dta_kind) {
6312 case DTRACEACT_SPECULATE:
6313 ASSERT(buf == &state->dts_buffer[cpuid]);
6314 buf = dtrace_speculation_buffer(state,
6315 cpuid, val);
6316
6317 if (buf == NULL) {
6318 *flags |= CPU_DTRACE_DROP;
6319 continue;
6320 }
6321
6322 offs = dtrace_buffer_reserve(buf,
6323 ecb->dte_needed, ecb->dte_alignment,
6324 state, NULL);
6325
6326 if (offs < 0) {
6327 *flags |= CPU_DTRACE_DROP;
6328 continue;
6329 }
6330
6331 tomax = buf->dtb_tomax;
6332 ASSERT(tomax != NULL);
6333
6334 if (ecb->dte_size != 0)
6335 DTRACE_STORE(uint32_t, tomax, offs,
6336 ecb->dte_epid);
6337 continue;
6338
6339 case DTRACEACT_PRINTM: {
6340 /* The DIF returns a 'memref'. */
6341 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
6342
6343 /* Get the size from the memref. */
6344 size = memref[1];
6345
6346 /*
6347 * Check if the size exceeds the allocated
6348 * buffer size.
6349 */
6350 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6351 /* Flag a drop! */
6352 *flags |= CPU_DTRACE_DROP;
6353 continue;
6354 }
6355
6356 /* Store the size in the buffer first. */
6357 DTRACE_STORE(uintptr_t, tomax,
6358 valoffs, size);
6359
6360 /*
6361 * Offset the buffer address to the start
6362 * of the data.
6363 */
6364 valoffs += sizeof(uintptr_t);
6365
6366 /*
6367 * Reset to the memory address rather than
6368 * the memref array, then let the BYREF
6369 * code below do the work to store the
6370 * memory data in the buffer.
6371 */
6372 val = memref[0];
6373 break;
6374 }
6375
6376 case DTRACEACT_PRINTT: {
6377 /* The DIF returns a 'typeref'. */
6378 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
6379 char c = '\0' + 1;
6380 size_t s;
6381
6382 /*
6383 * Get the type string length and round it
6384 * up so that the data that follows is
6385 * aligned for easy access.
6386 */
6387 size_t typs = strlen((char *) typeref[2]) + 1;
6388 typs = roundup(typs, sizeof(uintptr_t));
6389
6390 /*
6391 *Get the size from the typeref using the
6392 * number of elements and the type size.
6393 */
6394 size = typeref[1] * typeref[3];
6395
6396 /*
6397 * Check if the size exceeds the allocated
6398 * buffer size.
6399 */
6400 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6401 /* Flag a drop! */
6402 *flags |= CPU_DTRACE_DROP;
6403
6404 }
6405
6406 /* Store the size in the buffer first. */
6407 DTRACE_STORE(uintptr_t, tomax,
6408 valoffs, size);
6409 valoffs += sizeof(uintptr_t);
6410
6411 /* Store the type size in the buffer. */
6412 DTRACE_STORE(uintptr_t, tomax,
6413 valoffs, typeref[3]);
6414 valoffs += sizeof(uintptr_t);
6415
6416 val = typeref[2];
6417
6418 for (s = 0; s < typs; s++) {
6419 if (c != '\0')
6420 c = dtrace_load8(val++);
6421
6422 DTRACE_STORE(uint8_t, tomax,
6423 valoffs++, c);
6424 }
6425
6426 /*
6427 * Reset to the memory address rather than
6428 * the typeref array, then let the BYREF
6429 * code below do the work to store the
6430 * memory data in the buffer.
6431 */
6432 val = typeref[0];
6433 break;
6434 }
6435
6436 case DTRACEACT_CHILL:
6437 if (dtrace_priv_kernel_destructive(state))
6438 dtrace_action_chill(&mstate, val);
6439 continue;
6440
6441 case DTRACEACT_RAISE:
6442 if (dtrace_priv_proc_destructive(state))
6443 dtrace_action_raise(val);
6444 continue;
6445
6446 case DTRACEACT_COMMIT:
6447 ASSERT(!committed);
6448
6449 /*
6450 * We need to commit our buffer state.
6451 */
6452 if (ecb->dte_size)
6453 buf->dtb_offset = offs + ecb->dte_size;
6454 buf = &state->dts_buffer[cpuid];
6455 dtrace_speculation_commit(state, cpuid, val);
6456 committed = 1;
6457 continue;
6458
6459 case DTRACEACT_DISCARD:
6460 dtrace_speculation_discard(state, cpuid, val);
6461 continue;
6462
6463 case DTRACEACT_DIFEXPR:
6464 case DTRACEACT_LIBACT:
6465 case DTRACEACT_PRINTF:
6466 case DTRACEACT_PRINTA:
6467 case DTRACEACT_SYSTEM:
6468 case DTRACEACT_FREOPEN:
6469 break;
6470
6471 case DTRACEACT_SYM:
6472 case DTRACEACT_MOD:
6473 if (!dtrace_priv_kernel(state))
6474 continue;
6475 break;
6476
6477 case DTRACEACT_USYM:
6478 case DTRACEACT_UMOD:
6479 case DTRACEACT_UADDR: {
6480 #if defined(sun)
6481 struct pid *pid = curthread->t_procp->p_pidp;
6482 #endif
6483 if (!dtrace_priv_proc(state))
6484 continue;
6485
6486 DTRACE_STORE(uint64_t, tomax,
6487 #if defined(sun)
6488 valoffs, (uint64_t)pid->pid_id);
6489 #else
6490 valoffs, (uint64_t) curproc->p_pid);
6491 #endif
6492 DTRACE_STORE(uint64_t, tomax,
6493 valoffs + sizeof (uint64_t), val);
6494
6495 continue;
6496 }
6497
6498 case DTRACEACT_EXIT: {
6499 /*
6500 * For the exit action, we are going to attempt
6501 * to atomically set our activity to be
6502 * draining. If this fails (either because
6503 * another CPU has beat us to the exit action,
6504 * or because our current activity is something
6505 * other than ACTIVE or WARMUP), we will
6506 * continue. This assures that the exit action
6507 * can be successfully recorded at most once
6508 * when we're in the ACTIVE state. If we're
6509 * encountering the exit() action while in
6510 * COOLDOWN, however, we want to honor the new
6511 * status code. (We know that we're the only
6512 * thread in COOLDOWN, so there is no race.)
6513 */
6514 void *activity = &state->dts_activity;
6515 dtrace_activity_t current = state->dts_activity;
6516
6517 if (current == DTRACE_ACTIVITY_COOLDOWN)
6518 break;
6519
6520 if (current != DTRACE_ACTIVITY_WARMUP)
6521 current = DTRACE_ACTIVITY_ACTIVE;
6522
6523 if (dtrace_cas32(activity, current,
6524 DTRACE_ACTIVITY_DRAINING) != current) {
6525 *flags |= CPU_DTRACE_DROP;
6526 continue;
6527 }
6528
6529 break;
6530 }
6531
6532 default:
6533 ASSERT(0);
6534 }
6535
6536 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6537 uintptr_t end = valoffs + size;
6538
6539 if (!dtrace_vcanload((void *)(uintptr_t)val,
6540 &dp->dtdo_rtype, &mstate, vstate))
6541 continue;
6542
6543 /*
6544 * If this is a string, we're going to only
6545 * load until we find the zero byte -- after
6546 * which we'll store zero bytes.
6547 */
6548 if (dp->dtdo_rtype.dtdt_kind ==
6549 DIF_TYPE_STRING) {
6550 char c = '\0' + 1;
6551 int intuple = act->dta_intuple;
6552 size_t s;
6553
6554 for (s = 0; s < size; s++) {
6555 if (c != '\0')
6556 c = dtrace_load8(val++);
6557
6558 DTRACE_STORE(uint8_t, tomax,
6559 valoffs++, c);
6560
6561 if (c == '\0' && intuple)
6562 break;
6563 }
6564
6565 continue;
6566 }
6567
6568 while (valoffs < end) {
6569 DTRACE_STORE(uint8_t, tomax, valoffs++,
6570 dtrace_load8(val++));
6571 }
6572
6573 continue;
6574 }
6575
6576 switch (size) {
6577 case 0:
6578 break;
6579
6580 case sizeof (uint8_t):
6581 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6582 break;
6583 case sizeof (uint16_t):
6584 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6585 break;
6586 case sizeof (uint32_t):
6587 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6588 break;
6589 case sizeof (uint64_t):
6590 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6591 break;
6592 default:
6593 /*
6594 * Any other size should have been returned by
6595 * reference, not by value.
6596 */
6597 ASSERT(0);
6598 break;
6599 }
6600 }
6601
6602 if (*flags & CPU_DTRACE_DROP)
6603 continue;
6604
6605 if (*flags & CPU_DTRACE_FAULT) {
6606 int ndx;
6607 dtrace_action_t *err;
6608
6609 buf->dtb_errors++;
6610
6611 if (probe->dtpr_id == dtrace_probeid_error) {
6612 /*
6613 * There's nothing we can do -- we had an
6614 * error on the error probe. We bump an
6615 * error counter to at least indicate that
6616 * this condition happened.
6617 */
6618 dtrace_error(&state->dts_dblerrors);
6619 continue;
6620 }
6621
6622 if (vtime) {
6623 /*
6624 * Before recursing on dtrace_probe(), we
6625 * need to explicitly clear out our start
6626 * time to prevent it from being accumulated
6627 * into t_dtrace_vtime.
6628 */
6629 curthread->t_dtrace_start = 0;
6630 }
6631
6632 /*
6633 * Iterate over the actions to figure out which action
6634 * we were processing when we experienced the error.
6635 * Note that act points _past_ the faulting action; if
6636 * act is ecb->dte_action, the fault was in the
6637 * predicate, if it's ecb->dte_action->dta_next it's
6638 * in action #1, and so on.
6639 */
6640 for (err = ecb->dte_action, ndx = 0;
6641 err != act; err = err->dta_next, ndx++)
6642 continue;
6643
6644 dtrace_probe_error(state, ecb->dte_epid, ndx,
6645 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6646 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6647 cpu_core[cpuid].cpuc_dtrace_illval);
6648
6649 continue;
6650 }
6651
6652 if (!committed)
6653 buf->dtb_offset = offs + ecb->dte_size;
6654 }
6655
6656 if (vtime)
6657 curthread->t_dtrace_start = dtrace_gethrtime();
6658
6659 dtrace_interrupt_enable(cookie);
6660 }
6661
6662 /*
6663 * DTrace Probe Hashing Functions
6664 *
6665 * The functions in this section (and indeed, the functions in remaining
6666 * sections) are not _called_ from probe context. (Any exceptions to this are
6667 * marked with a "Note:".) Rather, they are called from elsewhere in the
6668 * DTrace framework to look-up probes in, add probes to and remove probes from
6669 * the DTrace probe hashes. (Each probe is hashed by each element of the
6670 * probe tuple -- allowing for fast lookups, regardless of what was
6671 * specified.)
6672 */
6673 static uint_t
6674 dtrace_hash_str(const char *p)
6675 {
6676 unsigned int g;
6677 uint_t hval = 0;
6678
6679 while (*p) {
6680 hval = (hval << 4) + *p++;
6681 if ((g = (hval & 0xf0000000)) != 0)
6682 hval ^= g >> 24;
6683 hval &= ~g;
6684 }
6685 return (hval);
6686 }
6687
6688 static dtrace_hash_t *
6689 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6690 {
6691 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6692
6693 hash->dth_stroffs = stroffs;
6694 hash->dth_nextoffs = nextoffs;
6695 hash->dth_prevoffs = prevoffs;
6696
6697 hash->dth_size = 1;
6698 hash->dth_mask = hash->dth_size - 1;
6699
6700 hash->dth_tab = kmem_zalloc(hash->dth_size *
6701 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6702
6703 return (hash);
6704 }
6705
6706 static void
6707 dtrace_hash_destroy(dtrace_hash_t *hash)
6708 {
6709 #ifdef DEBUG
6710 int i;
6711
6712 for (i = 0; i < hash->dth_size; i++)
6713 ASSERT(hash->dth_tab[i] == NULL);
6714 #endif
6715
6716 kmem_free(hash->dth_tab,
6717 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6718 kmem_free(hash, sizeof (dtrace_hash_t));
6719 }
6720
6721 static void
6722 dtrace_hash_resize(dtrace_hash_t *hash)
6723 {
6724 int size = hash->dth_size, i, ndx;
6725 int new_size = hash->dth_size << 1;
6726 int new_mask = new_size - 1;
6727 dtrace_hashbucket_t **new_tab, *bucket, *next;
6728
6729 ASSERT((new_size & new_mask) == 0);
6730
6731 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6732
6733 for (i = 0; i < size; i++) {
6734 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6735 dtrace_probe_t *probe = bucket->dthb_chain;
6736
6737 ASSERT(probe != NULL);
6738 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6739
6740 next = bucket->dthb_next;
6741 bucket->dthb_next = new_tab[ndx];
6742 new_tab[ndx] = bucket;
6743 }
6744 }
6745
6746 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6747 hash->dth_tab = new_tab;
6748 hash->dth_size = new_size;
6749 hash->dth_mask = new_mask;
6750 }
6751
6752 static void
6753 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6754 {
6755 int hashval = DTRACE_HASHSTR(hash, new);
6756 int ndx = hashval & hash->dth_mask;
6757 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6758 dtrace_probe_t **nextp, **prevp;
6759
6760 for (; bucket != NULL; bucket = bucket->dthb_next) {
6761 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6762 goto add;
6763 }
6764
6765 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6766 dtrace_hash_resize(hash);
6767 dtrace_hash_add(hash, new);
6768 return;
6769 }
6770
6771 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6772 bucket->dthb_next = hash->dth_tab[ndx];
6773 hash->dth_tab[ndx] = bucket;
6774 hash->dth_nbuckets++;
6775
6776 add:
6777 nextp = DTRACE_HASHNEXT(hash, new);
6778 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6779 *nextp = bucket->dthb_chain;
6780
6781 if (bucket->dthb_chain != NULL) {
6782 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6783 ASSERT(*prevp == NULL);
6784 *prevp = new;
6785 }
6786
6787 bucket->dthb_chain = new;
6788 bucket->dthb_len++;
6789 }
6790
6791 static dtrace_probe_t *
6792 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6793 {
6794 int hashval = DTRACE_HASHSTR(hash, template);
6795 int ndx = hashval & hash->dth_mask;
6796 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6797
6798 for (; bucket != NULL; bucket = bucket->dthb_next) {
6799 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6800 return (bucket->dthb_chain);
6801 }
6802
6803 return (NULL);
6804 }
6805
6806 static int
6807 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6808 {
6809 int hashval = DTRACE_HASHSTR(hash, template);
6810 int ndx = hashval & hash->dth_mask;
6811 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6812
6813 for (; bucket != NULL; bucket = bucket->dthb_next) {
6814 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6815 return (bucket->dthb_len);
6816 }
6817
6818 return (0);
6819 }
6820
6821 static void
6822 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6823 {
6824 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6825 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6826
6827 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6828 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6829
6830 /*
6831 * Find the bucket that we're removing this probe from.
6832 */
6833 for (; bucket != NULL; bucket = bucket->dthb_next) {
6834 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6835 break;
6836 }
6837
6838 ASSERT(bucket != NULL);
6839
6840 if (*prevp == NULL) {
6841 if (*nextp == NULL) {
6842 /*
6843 * The removed probe was the only probe on this
6844 * bucket; we need to remove the bucket.
6845 */
6846 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6847
6848 ASSERT(bucket->dthb_chain == probe);
6849 ASSERT(b != NULL);
6850
6851 if (b == bucket) {
6852 hash->dth_tab[ndx] = bucket->dthb_next;
6853 } else {
6854 while (b->dthb_next != bucket)
6855 b = b->dthb_next;
6856 b->dthb_next = bucket->dthb_next;
6857 }
6858
6859 ASSERT(hash->dth_nbuckets > 0);
6860 hash->dth_nbuckets--;
6861 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6862 return;
6863 }
6864
6865 bucket->dthb_chain = *nextp;
6866 } else {
6867 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6868 }
6869
6870 if (*nextp != NULL)
6871 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6872 }
6873
6874 /*
6875 * DTrace Utility Functions
6876 *
6877 * These are random utility functions that are _not_ called from probe context.
6878 */
6879 static int
6880 dtrace_badattr(const dtrace_attribute_t *a)
6881 {
6882 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6883 a->dtat_data > DTRACE_STABILITY_MAX ||
6884 a->dtat_class > DTRACE_CLASS_MAX);
6885 }
6886
6887 /*
6888 * Return a duplicate copy of a string. If the specified string is NULL,
6889 * this function returns a zero-length string.
6890 */
6891 static char *
6892 dtrace_strdup(const char *str)
6893 {
6894 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6895
6896 if (str != NULL)
6897 (void) strcpy(new, str);
6898
6899 return (new);
6900 }
6901
6902 #define DTRACE_ISALPHA(c) \
6903 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6904
6905 static int
6906 dtrace_badname(const char *s)
6907 {
6908 char c;
6909
6910 if (s == NULL || (c = *s++) == '\0')
6911 return (0);
6912
6913 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6914 return (1);
6915
6916 while ((c = *s++) != '\0') {
6917 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6918 c != '-' && c != '_' && c != '.' && c != '`')
6919 return (1);
6920 }
6921
6922 return (0);
6923 }
6924
6925 static void
6926 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6927 {
6928 uint32_t priv;
6929
6930 #if defined(sun)
6931 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6932 /*
6933 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter,
6934 * but for GCC they do.
6935 */
6936 *uidp = 0;
6937 *zoneidp = 0;
6938
6939 priv = DTRACE_PRIV_ALL;
6940 } else {
6941 *uidp = crgetuid(cr);
6942 *zoneidp = crgetzoneid(cr);
6943
6944 priv = 0;
6945 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6946 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6947 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6948 priv |= DTRACE_PRIV_USER;
6949 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6950 priv |= DTRACE_PRIV_PROC;
6951 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6952 priv |= DTRACE_PRIV_OWNER;
6953 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6954 priv |= DTRACE_PRIV_ZONEOWNER;
6955 }
6956 #else
6957 priv = DTRACE_PRIV_ALL;
6958 *uidp = 0;
6959 *zoneidp = 0;
6960 #endif
6961
6962 *privp = priv;
6963 }
6964
6965 #ifdef DTRACE_ERRDEBUG
6966 static void
6967 dtrace_errdebug(const char *str)
6968 {
6969 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
6970 int occupied = 0;
6971
6972 mutex_enter(&dtrace_errlock);
6973 dtrace_errlast = str;
6974 dtrace_errthread = curthread;
6975
6976 while (occupied++ < DTRACE_ERRHASHSZ) {
6977 if (dtrace_errhash[hval].dter_msg == str) {
6978 dtrace_errhash[hval].dter_count++;
6979 goto out;
6980 }
6981
6982 if (dtrace_errhash[hval].dter_msg != NULL) {
6983 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6984 continue;
6985 }
6986
6987 dtrace_errhash[hval].dter_msg = str;
6988 dtrace_errhash[hval].dter_count = 1;
6989 goto out;
6990 }
6991
6992 panic("dtrace: undersized error hash");
6993 out:
6994 mutex_exit(&dtrace_errlock);
6995 }
6996 #endif
6997
6998 /*
6999 * DTrace Matching Functions
7000 *
7001 * These functions are used to match groups of probes, given some elements of
7002 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7003 */
7004 static int
7005 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7006 zoneid_t zoneid)
7007 {
7008 if (priv != DTRACE_PRIV_ALL) {
7009 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7010 uint32_t match = priv & ppriv;
7011
7012 /*
7013 * No PRIV_DTRACE_* privileges...
7014 */
7015 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7016 DTRACE_PRIV_KERNEL)) == 0)
7017 return (0);
7018
7019 /*
7020 * No matching bits, but there were bits to match...
7021 */
7022 if (match == 0 && ppriv != 0)
7023 return (0);
7024
7025 /*
7026 * Need to have permissions to the process, but don't...
7027 */
7028 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7029 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7030 return (0);
7031 }
7032
7033 /*
7034 * Need to be in the same zone unless we possess the
7035 * privilege to examine all zones.
7036 */
7037 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7038 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7039 return (0);
7040 }
7041 }
7042
7043 return (1);
7044 }
7045
7046 /*
7047 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7048 * consists of input pattern strings and an ops-vector to evaluate them.
7049 * This function returns >0 for match, 0 for no match, and <0 for error.
7050 */
7051 static int
7052 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7053 uint32_t priv, uid_t uid, zoneid_t zoneid)
7054 {
7055 dtrace_provider_t *pvp = prp->dtpr_provider;
7056 int rv;
7057
7058 if (pvp->dtpv_defunct)
7059 return (0);
7060
7061 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7062 return (rv);
7063
7064 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7065 return (rv);
7066
7067 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7068 return (rv);
7069
7070 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7071 return (rv);
7072
7073 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7074 return (0);
7075
7076 return (rv);
7077 }
7078
7079 /*
7080 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7081 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7082 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7083 * In addition, all of the recursion cases except for '*' matching have been
7084 * unwound. For '*', we still implement recursive evaluation, but a depth
7085 * counter is maintained and matching is aborted if we recurse too deep.
7086 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7087 */
7088 static int
7089 dtrace_match_glob(const char *s, const char *p, int depth)
7090 {
7091 const char *olds;
7092 char s1, c;
7093 int gs;
7094
7095 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7096 return (-1);
7097
7098 if (s == NULL)
7099 s = ""; /* treat NULL as empty string */
7100
7101 top:
7102 olds = s;
7103 s1 = *s++;
7104
7105 if (p == NULL)
7106 return (0);
7107
7108 if ((c = *p++) == '\0')
7109 return (s1 == '\0');
7110
7111 switch (c) {
7112 case '[': {
7113 int ok = 0, notflag = 0;
7114 char lc = '\0';
7115
7116 if (s1 == '\0')
7117 return (0);
7118
7119 if (*p == '!') {
7120 notflag = 1;
7121 p++;
7122 }
7123
7124 if ((c = *p++) == '\0')
7125 return (0);
7126
7127 do {
7128 if (c == '-' && lc != '\0' && *p != ']') {
7129 if ((c = *p++) == '\0')
7130 return (0);
7131 if (c == '\\' && (c = *p++) == '\0')
7132 return (0);
7133
7134 if (notflag) {
7135 if (s1 < lc || s1 > c)
7136 ok++;
7137 else
7138 return (0);
7139 } else if (lc <= s1 && s1 <= c)
7140 ok++;
7141
7142 } else if (c == '\\' && (c = *p++) == '\0')
7143 return (0);
7144
7145 lc = c; /* save left-hand 'c' for next iteration */
7146
7147 if (notflag) {
7148 if (s1 != c)
7149 ok++;
7150 else
7151 return (0);
7152 } else if (s1 == c)
7153 ok++;
7154
7155 if ((c = *p++) == '\0')
7156 return (0);
7157
7158 } while (c != ']');
7159
7160 if (ok)
7161 goto top;
7162
7163 return (0);
7164 }
7165
7166 case '\\':
7167 if ((c = *p++) == '\0')
7168 return (0);
7169 /*FALLTHRU*/
7170
7171 default:
7172 if (c != s1)
7173 return (0);
7174 /*FALLTHRU*/
7175
7176 case '?':
7177 if (s1 != '\0')
7178 goto top;
7179 return (0);
7180
7181 case '*':
7182 while (*p == '*')
7183 p++; /* consecutive *'s are identical to a single one */
7184
7185 if (*p == '\0')
7186 return (1);
7187
7188 for (s = olds; *s != '\0'; s++) {
7189 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7190 return (gs);
7191 }
7192
7193 return (0);
7194 }
7195 }
7196
7197 /*ARGSUSED*/
7198 static int
7199 dtrace_match_string(const char *s, const char *p, int depth)
7200 {
7201 return (s != NULL && strcmp(s, p) == 0);
7202 }
7203
7204 /*ARGSUSED*/
7205 static int
7206 dtrace_match_nul(const char *s, const char *p, int depth)
7207 {
7208 return (1); /* always match the empty pattern */
7209 }
7210
7211 /*ARGSUSED*/
7212 static int
7213 dtrace_match_nonzero(const char *s, const char *p, int depth)
7214 {
7215 return (s != NULL && s[0] != '\0');
7216 }
7217
7218 static int
7219 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7220 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7221 {
7222 dtrace_probe_t template, *probe;
7223 dtrace_hash_t *hash = NULL;
7224 int len, rc, best = INT_MAX, nmatched = 0;
7225 dtrace_id_t i;
7226
7227 ASSERT(MUTEX_HELD(&dtrace_lock));
7228
7229 /*
7230 * If the probe ID is specified in the key, just lookup by ID and
7231 * invoke the match callback once if a matching probe is found.
7232 */
7233 if (pkp->dtpk_id != DTRACE_IDNONE) {
7234 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7235 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7236 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
7237 return (DTRACE_MATCH_FAIL);
7238 nmatched++;
7239 }
7240 return (nmatched);
7241 }
7242
7243 template.dtpr_mod = (char *)pkp->dtpk_mod;
7244 template.dtpr_func = (char *)pkp->dtpk_func;
7245 template.dtpr_name = (char *)pkp->dtpk_name;
7246
7247 /*
7248 * We want to find the most distinct of the module name, function
7249 * name, and name. So for each one that is not a glob pattern or
7250 * empty string, we perform a lookup in the corresponding hash and
7251 * use the hash table with the fewest collisions to do our search.
7252 */
7253 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7254 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7255 best = len;
7256 hash = dtrace_bymod;
7257 }
7258
7259 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7260 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7261 best = len;
7262 hash = dtrace_byfunc;
7263 }
7264
7265 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7266 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7267 best = len;
7268 hash = dtrace_byname;
7269 }
7270
7271 /*
7272 * If we did not select a hash table, iterate over every probe and
7273 * invoke our callback for each one that matches our input probe key.
7274 */
7275 if (hash == NULL) {
7276 for (i = 0; i < dtrace_nprobes; i++) {
7277 if ((probe = dtrace_probes[i]) == NULL ||
7278 dtrace_match_probe(probe, pkp, priv, uid,
7279 zoneid) <= 0)
7280 continue;
7281
7282 nmatched++;
7283
7284 if ((rc = (*matched)(probe, arg)) !=
7285 DTRACE_MATCH_NEXT) {
7286 if (rc == DTRACE_MATCH_FAIL)
7287 return (DTRACE_MATCH_FAIL);
7288 break;
7289 }
7290 }
7291
7292 return (nmatched);
7293 }
7294
7295 /*
7296 * If we selected a hash table, iterate over each probe of the same key
7297 * name and invoke the callback for every probe that matches the other
7298 * attributes of our input probe key.
7299 */
7300 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7301 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7302
7303 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7304 continue;
7305
7306 nmatched++;
7307
7308 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
7309 if (rc == DTRACE_MATCH_FAIL)
7310 return (DTRACE_MATCH_FAIL);
7311 break;
7312 }
7313 }
7314
7315 return (nmatched);
7316 }
7317
7318 /*
7319 * Return the function pointer dtrace_probecmp() should use to compare the
7320 * specified pattern with a string. For NULL or empty patterns, we select
7321 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7322 * For non-empty non-glob strings, we use dtrace_match_string().
7323 */
7324 static dtrace_probekey_f *
7325 dtrace_probekey_func(const char *p)
7326 {
7327 char c;
7328
7329 if (p == NULL || *p == '\0')
7330 return (&dtrace_match_nul);
7331
7332 while ((c = *p++) != '\0') {
7333 if (c == '[' || c == '?' || c == '*' || c == '\\')
7334 return (&dtrace_match_glob);
7335 }
7336
7337 return (&dtrace_match_string);
7338 }
7339
7340 /*
7341 * Build a probe comparison key for use with dtrace_match_probe() from the
7342 * given probe description. By convention, a null key only matches anchored
7343 * probes: if each field is the empty string, reset dtpk_fmatch to
7344 * dtrace_match_nonzero().
7345 */
7346 static void
7347 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7348 {
7349 pkp->dtpk_prov = pdp->dtpd_provider;
7350 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7351
7352 pkp->dtpk_mod = pdp->dtpd_mod;
7353 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7354
7355 pkp->dtpk_func = pdp->dtpd_func;
7356 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7357
7358 pkp->dtpk_name = pdp->dtpd_name;
7359 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7360
7361 pkp->dtpk_id = pdp->dtpd_id;
7362
7363 if (pkp->dtpk_id == DTRACE_IDNONE &&
7364 pkp->dtpk_pmatch == &dtrace_match_nul &&
7365 pkp->dtpk_mmatch == &dtrace_match_nul &&
7366 pkp->dtpk_fmatch == &dtrace_match_nul &&
7367 pkp->dtpk_nmatch == &dtrace_match_nul)
7368 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7369 }
7370
7371 /*
7372 * DTrace Provider-to-Framework API Functions
7373 *
7374 * These functions implement much of the Provider-to-Framework API, as
7375 * described in <sys/dtrace.h>. The parts of the API not in this section are
7376 * the functions in the API for probe management (found below), and
7377 * dtrace_probe() itself (found above).
7378 */
7379
7380 /*
7381 * Register the calling provider with the DTrace framework. This should
7382 * generally be called by DTrace providers in their attach(9E) entry point.
7383 */
7384 int
7385 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7386 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7387 {
7388 dtrace_provider_t *provider;
7389
7390 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7391 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7392 "arguments", name ? name : "<NULL>");
7393 return (EINVAL);
7394 }
7395
7396 if (name[0] == '\0' || dtrace_badname(name)) {
7397 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7398 "provider name", name);
7399 return (EINVAL);
7400 }
7401
7402 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7403 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7404 pops->dtps_destroy == NULL ||
7405 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7406 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7407 "provider ops", name);
7408 return (EINVAL);
7409 }
7410
7411 if (dtrace_badattr(&pap->dtpa_provider) ||
7412 dtrace_badattr(&pap->dtpa_mod) ||
7413 dtrace_badattr(&pap->dtpa_func) ||
7414 dtrace_badattr(&pap->dtpa_name) ||
7415 dtrace_badattr(&pap->dtpa_args)) {
7416 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7417 "provider attributes", name);
7418 return (EINVAL);
7419 }
7420
7421 if (priv & ~DTRACE_PRIV_ALL) {
7422 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7423 "privilege attributes", name);
7424 return (EINVAL);
7425 }
7426
7427 if ((priv & DTRACE_PRIV_KERNEL) &&
7428 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7429 pops->dtps_usermode == NULL) {
7430 cmn_err(CE_WARN, "failed to register provider '%s': need "
7431 "dtps_usermode() op for given privilege attributes", name);
7432 return (EINVAL);
7433 }
7434
7435 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7436 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7437 (void) strcpy(provider->dtpv_name, name);
7438
7439 provider->dtpv_attr = *pap;
7440 provider->dtpv_priv.dtpp_flags = priv;
7441 if (cr != NULL) {
7442 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7443 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7444 }
7445 provider->dtpv_pops = *pops;
7446
7447 if (pops->dtps_provide == NULL) {
7448 ASSERT(pops->dtps_provide_module != NULL);
7449 provider->dtpv_pops.dtps_provide =
7450 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
7451 }
7452
7453 if (pops->dtps_provide_module == NULL) {
7454 ASSERT(pops->dtps_provide != NULL);
7455 #if defined(sun)
7456 provider->dtpv_pops.dtps_provide_module =
7457 (void (*)(void *, modctl_t *))dtrace_nullop;
7458 #else
7459 provider->dtpv_pops.dtps_provide_module =
7460 (void (*)(void *, dtrace_modctl_t *))dtrace_nullop;
7461 #endif
7462 }
7463
7464 if (pops->dtps_suspend == NULL) {
7465 ASSERT(pops->dtps_resume == NULL);
7466 provider->dtpv_pops.dtps_suspend =
7467 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7468 provider->dtpv_pops.dtps_resume =
7469 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7470 }
7471
7472 provider->dtpv_arg = arg;
7473 *idp = (dtrace_provider_id_t)provider;
7474
7475 if (pops == &dtrace_provider_ops) {
7476 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7477 ASSERT(MUTEX_HELD(&dtrace_lock));
7478 ASSERT(dtrace_anon.dta_enabling == NULL);
7479
7480 /*
7481 * We make sure that the DTrace provider is at the head of
7482 * the provider chain.
7483 */
7484 provider->dtpv_next = dtrace_provider;
7485 dtrace_provider = provider;
7486 return (0);
7487 }
7488
7489 mutex_enter(&dtrace_provider_lock);
7490 mutex_enter(&dtrace_lock);
7491
7492 /*
7493 * If there is at least one provider registered, we'll add this
7494 * provider after the first provider.
7495 */
7496 if (dtrace_provider != NULL) {
7497 provider->dtpv_next = dtrace_provider->dtpv_next;
7498 dtrace_provider->dtpv_next = provider;
7499 } else {
7500 dtrace_provider = provider;
7501 }
7502
7503 if (dtrace_retained != NULL) {
7504 dtrace_enabling_provide(provider);
7505
7506 /*
7507 * Now we need to call dtrace_enabling_matchall() -- which
7508 * will acquire cpu_lock and dtrace_lock. We therefore need
7509 * to drop all of our locks before calling into it...
7510 */
7511 mutex_exit(&dtrace_lock);
7512 mutex_exit(&dtrace_provider_lock);
7513 dtrace_enabling_matchall();
7514
7515 return (0);
7516 }
7517
7518 mutex_exit(&dtrace_lock);
7519 mutex_exit(&dtrace_provider_lock);
7520
7521 return (0);
7522 }
7523
7524 /*
7525 * Unregister the specified provider from the DTrace framework. This should
7526 * generally be called by DTrace providers in their detach(9E) entry point.
7527 */
7528 int
7529 dtrace_unregister(dtrace_provider_id_t id)
7530 {
7531 dtrace_provider_t *old = (dtrace_provider_t *)id;
7532 dtrace_provider_t *prev = NULL;
7533 int i, self = 0;
7534 dtrace_probe_t *probe, *first = NULL;
7535
7536 if (old->dtpv_pops.dtps_enable ==
7537 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
7538 /*
7539 * If DTrace itself is the provider, we're called with locks
7540 * already held.
7541 */
7542 ASSERT(old == dtrace_provider);
7543 #if defined(sun)
7544 ASSERT(dtrace_devi != NULL);
7545 #endif
7546 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7547 ASSERT(MUTEX_HELD(&dtrace_lock));
7548 self = 1;
7549
7550 if (dtrace_provider->dtpv_next != NULL) {
7551 /*
7552 * There's another provider here; return failure.
7553 */
7554 return (EBUSY);
7555 }
7556 } else {
7557 mutex_enter(&dtrace_provider_lock);
7558 mutex_enter(&mod_lock);
7559 mutex_enter(&dtrace_lock);
7560 }
7561
7562 /*
7563 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7564 * probes, we refuse to let providers slither away, unless this
7565 * provider has already been explicitly invalidated.
7566 */
7567 if (!old->dtpv_defunct &&
7568 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7569 dtrace_anon.dta_state->dts_necbs > 0))) {
7570 if (!self) {
7571 mutex_exit(&dtrace_lock);
7572 mutex_exit(&mod_lock);
7573 mutex_exit(&dtrace_provider_lock);
7574 }
7575 return (EBUSY);
7576 }
7577
7578 /*
7579 * Attempt to destroy the probes associated with this provider.
7580 */
7581 for (i = 0; i < dtrace_nprobes; i++) {
7582 if ((probe = dtrace_probes[i]) == NULL)
7583 continue;
7584
7585 if (probe->dtpr_provider != old)
7586 continue;
7587
7588 if (probe->dtpr_ecb == NULL)
7589 continue;
7590
7591 /*
7592 * We have at least one ECB; we can't remove this provider.
7593 */
7594 if (!self) {
7595 mutex_exit(&dtrace_lock);
7596 mutex_exit(&mod_lock);
7597 mutex_exit(&dtrace_provider_lock);
7598 }
7599 return (EBUSY);
7600 }
7601
7602 /*
7603 * All of the probes for this provider are disabled; we can safely
7604 * remove all of them from their hash chains and from the probe array.
7605 */
7606 for (i = 0; i < dtrace_nprobes; i++) {
7607 if ((probe = dtrace_probes[i]) == NULL)
7608 continue;
7609
7610 if (probe->dtpr_provider != old)
7611 continue;
7612
7613 dtrace_probes[i] = NULL;
7614
7615 dtrace_hash_remove(dtrace_bymod, probe);
7616 dtrace_hash_remove(dtrace_byfunc, probe);
7617 dtrace_hash_remove(dtrace_byname, probe);
7618
7619 if (first == NULL) {
7620 first = probe;
7621 probe->dtpr_nextmod = NULL;
7622 } else {
7623 probe->dtpr_nextmod = first;
7624 first = probe;
7625 }
7626 }
7627
7628 /*
7629 * The provider's probes have been removed from the hash chains and
7630 * from the probe array. Now issue a dtrace_sync() to be sure that
7631 * everyone has cleared out from any probe array processing.
7632 */
7633 dtrace_sync();
7634
7635 for (probe = first; probe != NULL; probe = first) {
7636 first = probe->dtpr_nextmod;
7637
7638 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7639 probe->dtpr_arg);
7640 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7641 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7642 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7643 #if defined(sun)
7644 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7645 #else
7646 vmem_free(dtrace_arena, (uintptr_t)(probe->dtpr_id), 1);
7647 #endif
7648 kmem_free(probe, sizeof (dtrace_probe_t));
7649 }
7650
7651 if ((prev = dtrace_provider) == old) {
7652 #if defined(sun)
7653 ASSERT(self || dtrace_devi == NULL);
7654 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7655 #endif
7656 dtrace_provider = old->dtpv_next;
7657 } else {
7658 while (prev != NULL && prev->dtpv_next != old)
7659 prev = prev->dtpv_next;
7660
7661 if (prev == NULL) {
7662 panic("attempt to unregister non-existent "
7663 "dtrace provider %p\n", (void *)id);
7664 }
7665
7666 prev->dtpv_next = old->dtpv_next;
7667 }
7668
7669 if (!self) {
7670 mutex_exit(&dtrace_lock);
7671 mutex_exit(&mod_lock);
7672 mutex_exit(&dtrace_provider_lock);
7673 }
7674
7675 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7676 kmem_free(old, sizeof (dtrace_provider_t));
7677
7678 return (0);
7679 }
7680
7681 /*
7682 * Invalidate the specified provider. All subsequent probe lookups for the
7683 * specified provider will fail, but its probes will not be removed.
7684 */
7685 void
7686 dtrace_invalidate(dtrace_provider_id_t id)
7687 {
7688 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7689
7690 ASSERT(pvp->dtpv_pops.dtps_enable !=
7691 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
7692
7693 mutex_enter(&dtrace_provider_lock);
7694 mutex_enter(&dtrace_lock);
7695
7696 pvp->dtpv_defunct = 1;
7697
7698 mutex_exit(&dtrace_lock);
7699 mutex_exit(&dtrace_provider_lock);
7700 }
7701
7702 /*
7703 * Indicate whether or not DTrace has attached.
7704 */
7705 int
7706 dtrace_attached(void)
7707 {
7708 /*
7709 * dtrace_provider will be non-NULL iff the DTrace driver has
7710 * attached. (It's non-NULL because DTrace is always itself a
7711 * provider.)
7712 */
7713 return (dtrace_provider != NULL);
7714 }
7715
7716 /*
7717 * Remove all the unenabled probes for the given provider. This function is
7718 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7719 * -- just as many of its associated probes as it can.
7720 */
7721 int
7722 dtrace_condense(dtrace_provider_id_t id)
7723 {
7724 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7725 int i;
7726 dtrace_probe_t *probe;
7727
7728 /*
7729 * Make sure this isn't the dtrace provider itself.
7730 */
7731 ASSERT(prov->dtpv_pops.dtps_enable !=
7732 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
7733
7734 mutex_enter(&dtrace_provider_lock);
7735 mutex_enter(&dtrace_lock);
7736
7737 /*
7738 * Attempt to destroy the probes associated with this provider.
7739 */
7740 for (i = 0; i < dtrace_nprobes; i++) {
7741 if ((probe = dtrace_probes[i]) == NULL)
7742 continue;
7743
7744 if (probe->dtpr_provider != prov)
7745 continue;
7746
7747 if (probe->dtpr_ecb != NULL)
7748 continue;
7749
7750 dtrace_probes[i] = NULL;
7751
7752 dtrace_hash_remove(dtrace_bymod, probe);
7753 dtrace_hash_remove(dtrace_byfunc, probe);
7754 dtrace_hash_remove(dtrace_byname, probe);
7755
7756 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7757 probe->dtpr_arg);
7758 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7759 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7760 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7761 kmem_free(probe, sizeof (dtrace_probe_t));
7762 #if defined(sun)
7763 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7764 #else
7765 vmem_free(dtrace_arena, ((uintptr_t)i + 1), 1);
7766 #endif
7767 }
7768
7769 mutex_exit(&dtrace_lock);
7770 mutex_exit(&dtrace_provider_lock);
7771
7772 return (0);
7773 }
7774
7775 /*
7776 * DTrace Probe Management Functions
7777 *
7778 * The functions in this section perform the DTrace probe management,
7779 * including functions to create probes, look-up probes, and call into the
7780 * providers to request that probes be provided. Some of these functions are
7781 * in the Provider-to-Framework API; these functions can be identified by the
7782 * fact that they are not declared "static".
7783 */
7784
7785 /*
7786 * Create a probe with the specified module name, function name, and name.
7787 */
7788 dtrace_id_t
7789 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7790 const char *func, const char *name, int aframes, void *arg)
7791 {
7792 dtrace_probe_t *probe, **probes;
7793 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7794 dtrace_id_t id;
7795 vmem_addr_t offset;
7796
7797 if (provider == dtrace_provider) {
7798 ASSERT(MUTEX_HELD(&dtrace_lock));
7799 } else {
7800 mutex_enter(&dtrace_lock);
7801 }
7802
7803 if (vmem_alloc(dtrace_arena, 1, VM_BESTFIT | VM_SLEEP, &offset) != 0)
7804 ASSERT(0);
7805 id = (dtrace_id_t)(uintptr_t)offset;
7806 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7807
7808 probe->dtpr_id = id;
7809 probe->dtpr_gen = dtrace_probegen++;
7810 probe->dtpr_mod = dtrace_strdup(mod);
7811 probe->dtpr_func = dtrace_strdup(func);
7812 probe->dtpr_name = dtrace_strdup(name);
7813 probe->dtpr_arg = arg;
7814 probe->dtpr_aframes = aframes;
7815 probe->dtpr_provider = provider;
7816
7817 dtrace_hash_add(dtrace_bymod, probe);
7818 dtrace_hash_add(dtrace_byfunc, probe);
7819 dtrace_hash_add(dtrace_byname, probe);
7820
7821 if (id - 1 >= dtrace_nprobes) {
7822 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7823 size_t nsize = osize << 1;
7824
7825 if (nsize == 0) {
7826 ASSERT(osize == 0);
7827 ASSERT(dtrace_probes == NULL);
7828 nsize = sizeof (dtrace_probe_t *);
7829 }
7830
7831 probes = kmem_zalloc(nsize, KM_SLEEP);
7832 dtrace_probes_size = nsize;
7833
7834 if (dtrace_probes == NULL) {
7835 ASSERT(osize == 0);
7836 dtrace_probes = probes;
7837 dtrace_nprobes = 1;
7838 } else {
7839 dtrace_probe_t **oprobes = dtrace_probes;
7840
7841 bcopy(oprobes, probes, osize);
7842 dtrace_membar_producer();
7843 dtrace_probes = probes;
7844
7845 dtrace_sync();
7846
7847 /*
7848 * All CPUs are now seeing the new probes array; we can
7849 * safely free the old array.
7850 */
7851 kmem_free(oprobes, osize);
7852 dtrace_nprobes <<= 1;
7853 }
7854
7855 ASSERT(id - 1 < dtrace_nprobes);
7856 }
7857
7858 ASSERT(dtrace_probes[id - 1] == NULL);
7859 dtrace_probes[id - 1] = probe;
7860
7861 if (provider != dtrace_provider)
7862 mutex_exit(&dtrace_lock);
7863
7864 return (id);
7865 }
7866
7867 static dtrace_probe_t *
7868 dtrace_probe_lookup_id(dtrace_id_t id)
7869 {
7870 ASSERT(MUTEX_HELD(&dtrace_lock));
7871
7872 if (id == 0 || id > dtrace_nprobes)
7873 return (NULL);
7874
7875 return (dtrace_probes[id - 1]);
7876 }
7877
7878 static int
7879 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7880 {
7881 *((dtrace_id_t *)arg) = probe->dtpr_id;
7882
7883 return (DTRACE_MATCH_DONE);
7884 }
7885
7886 /*
7887 * Look up a probe based on provider and one or more of module name, function
7888 * name and probe name.
7889 */
7890 dtrace_id_t
7891 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
7892 const char *func, const char *name)
7893 {
7894 dtrace_probekey_t pkey;
7895 dtrace_id_t id;
7896 int match;
7897
7898 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7899 pkey.dtpk_pmatch = &dtrace_match_string;
7900 pkey.dtpk_mod = __UNCONST(mod);
7901 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7902 pkey.dtpk_func = __UNCONST(func);
7903 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7904 pkey.dtpk_name = __UNCONST(name);
7905 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7906 pkey.dtpk_id = DTRACE_IDNONE;
7907
7908 mutex_enter(&dtrace_lock);
7909 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7910 dtrace_probe_lookup_match, &id);
7911 mutex_exit(&dtrace_lock);
7912
7913 ASSERT(match == 1 || match == 0);
7914 return (match ? id : 0);
7915 }
7916
7917 /*
7918 * Returns the probe argument associated with the specified probe.
7919 */
7920 void *
7921 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7922 {
7923 dtrace_probe_t *probe;
7924 void *rval = NULL;
7925
7926 mutex_enter(&dtrace_lock);
7927
7928 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7929 probe->dtpr_provider == (dtrace_provider_t *)id)
7930 rval = probe->dtpr_arg;
7931
7932 mutex_exit(&dtrace_lock);
7933
7934 return (rval);
7935 }
7936
7937 /*
7938 * Copy a probe into a probe description.
7939 */
7940 static void
7941 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7942 {
7943 bzero(pdp, sizeof (dtrace_probedesc_t));
7944 pdp->dtpd_id = prp->dtpr_id;
7945
7946 (void) strncpy(pdp->dtpd_provider,
7947 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7948
7949 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7950 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7951 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7952 }
7953
7954 #ifdef notyet /* XXX TBD */
7955 #if !defined(sun)
7956 static int
7957 dtrace_probe_provide_cb(linker_file_t lf, void *arg)
7958 {
7959 dtrace_provider_t *prv = (dtrace_provider_t *) arg;
7960
7961 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf);
7962
7963 return(0);
7964 }
7965 #endif
7966 #endif /* notyet */
7967
7968
7969 /*
7970 * Called to indicate that a probe -- or probes -- should be provided by a
7971 * specfied provider. If the specified description is NULL, the provider will
7972 * be told to provide all of its probes. (This is done whenever a new
7973 * consumer comes along, or whenever a retained enabling is to be matched.) If
7974 * the specified description is non-NULL, the provider is given the
7975 * opportunity to dynamically provide the specified probe, allowing providers
7976 * to support the creation of probes on-the-fly. (So-called _autocreated_
7977 * probes.) If the provider is NULL, the operations will be applied to all
7978 * providers; if the provider is non-NULL the operations will only be applied
7979 * to the specified provider. The dtrace_provider_lock must be held, and the
7980 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7981 * will need to grab the dtrace_lock when it reenters the framework through
7982 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7983 */
7984 static void
7985 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7986 {
7987 #if defined(sun)
7988 modctl_t *ctl;
7989 #else
7990 module_t *mod;
7991 #endif
7992 int all = 0;
7993
7994 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7995
7996 if (prv == NULL) {
7997 all = 1;
7998 prv = dtrace_provider;
7999 }
8000
8001 do {
8002 /*
8003 * First, call the blanket provide operation.
8004 */
8005 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8006
8007 /*
8008 * Now call the per-module provide operation. We will grab
8009 * mod_lock to prevent the list from being modified. Note
8010 * that this also prevents the mod_busy bits from changing.
8011 * (mod_busy can only be changed with mod_lock held.)
8012 */
8013 mutex_enter(&mod_lock);
8014
8015 #if defined(sun)
8016 ctl = &modules;
8017 do {
8018 if (ctl->mod_busy || ctl->mod_mp == NULL)
8019 continue;
8020
8021 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8022
8023 } while ((ctl = ctl->mod_next) != &modules);
8024 #else
8025
8026 /* Fake netbsd module first */
8027 if (mod_nbsd == NULL) {
8028 mod_nbsd = kmem_zalloc(sizeof(*mod_nbsd), KM_SLEEP);
8029 mod_nbsd->mod_info = kmem_zalloc(sizeof(modinfo_t), KM_SLEEP);
8030 mod_nbsd->mod_refcnt = 1;
8031 *((char **)(intptr_t)&mod_nbsd->mod_info->mi_name) = __UNCONST("netbsd");
8032 }
8033
8034 kernconfig_lock();
8035 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, mod_nbsd);
8036 TAILQ_FOREACH(mod, &module_list, mod_chain) {
8037 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, mod);
8038 }
8039 kernconfig_unlock();
8040 #endif
8041
8042 mutex_exit(&mod_lock);
8043 } while (all && (prv = prv->dtpv_next) != NULL);
8044 }
8045
8046 #if defined(sun)
8047 /*
8048 * Iterate over each probe, and call the Framework-to-Provider API function
8049 * denoted by offs.
8050 */
8051 static void
8052 dtrace_probe_foreach(uintptr_t offs)
8053 {
8054 dtrace_provider_t *prov;
8055 void (*func)(void *, dtrace_id_t, void *);
8056 dtrace_probe_t *probe;
8057 dtrace_icookie_t cookie;
8058 int i;
8059
8060 /*
8061 * We disable interrupts to walk through the probe array. This is
8062 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8063 * won't see stale data.
8064 */
8065 cookie = dtrace_interrupt_disable();
8066
8067 for (i = 0; i < dtrace_nprobes; i++) {
8068 if ((probe = dtrace_probes[i]) == NULL)
8069 continue;
8070
8071 if (probe->dtpr_ecb == NULL) {
8072 /*
8073 * This probe isn't enabled -- don't call the function.
8074 */
8075 continue;
8076 }
8077
8078 prov = probe->dtpr_provider;
8079 func = *((void(**)(void *, dtrace_id_t, void *))
8080 ((uintptr_t)&prov->dtpv_pops + offs));
8081
8082 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8083 }
8084
8085 dtrace_interrupt_enable(cookie);
8086 }
8087 #endif
8088
8089 static int
8090 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8091 {
8092 dtrace_probekey_t pkey;
8093 uint32_t priv;
8094 uid_t uid;
8095 zoneid_t zoneid;
8096
8097 ASSERT(MUTEX_HELD(&dtrace_lock));
8098 dtrace_ecb_create_cache = NULL;
8099
8100 if (desc == NULL) {
8101 /*
8102 * If we're passed a NULL description, we're being asked to
8103 * create an ECB with a NULL probe.
8104 */
8105 (void) dtrace_ecb_create_enable(NULL, enab);
8106 return (0);
8107 }
8108
8109 dtrace_probekey(desc, &pkey);
8110 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8111 &priv, &uid, &zoneid);
8112
8113 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8114 enab));
8115 }
8116
8117 /*
8118 * DTrace Helper Provider Functions
8119 */
8120 static void
8121 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8122 {
8123 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8124 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8125 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8126 }
8127
8128 static void
8129 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8130 const dof_provider_t *dofprov, char *strtab)
8131 {
8132 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8133 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8134 dofprov->dofpv_provattr);
8135 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8136 dofprov->dofpv_modattr);
8137 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8138 dofprov->dofpv_funcattr);
8139 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8140 dofprov->dofpv_nameattr);
8141 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8142 dofprov->dofpv_argsattr);
8143 }
8144
8145 static void
8146 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8147 {
8148 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8149 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8150 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8151 dof_provider_t *provider;
8152 dof_probe_t *probe;
8153 uint32_t *off, *enoff;
8154 uint8_t *arg;
8155 char *strtab;
8156 uint_t i, nprobes;
8157 dtrace_helper_provdesc_t dhpv;
8158 dtrace_helper_probedesc_t dhpb;
8159 dtrace_meta_t *meta = dtrace_meta_pid;
8160 dtrace_mops_t *mops = &meta->dtm_mops;
8161 void *parg;
8162
8163 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8164 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8165 provider->dofpv_strtab * dof->dofh_secsize);
8166 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8167 provider->dofpv_probes * dof->dofh_secsize);
8168 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8169 provider->dofpv_prargs * dof->dofh_secsize);
8170 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8171 provider->dofpv_proffs * dof->dofh_secsize);
8172
8173 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8174 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8175 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8176 enoff = NULL;
8177
8178 /*
8179 * See dtrace_helper_provider_validate().
8180 */
8181 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8182 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8183 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8184 provider->dofpv_prenoffs * dof->dofh_secsize);
8185 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8186 }
8187
8188 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8189
8190 /*
8191 * Create the provider.
8192 */
8193 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8194
8195 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8196 return;
8197
8198 meta->dtm_count++;
8199
8200 /*
8201 * Create the probes.
8202 */
8203 for (i = 0; i < nprobes; i++) {
8204 probe = (dof_probe_t *)(uintptr_t)(daddr +
8205 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8206
8207 dhpb.dthpb_mod = dhp->dofhp_mod;
8208 dhpb.dthpb_func = strtab + probe->dofpr_func;
8209 dhpb.dthpb_name = strtab + probe->dofpr_name;
8210 dhpb.dthpb_base = probe->dofpr_addr;
8211 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8212 dhpb.dthpb_noffs = probe->dofpr_noffs;
8213 if (enoff != NULL) {
8214 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8215 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8216 } else {
8217 dhpb.dthpb_enoffs = NULL;
8218 dhpb.dthpb_nenoffs = 0;
8219 }
8220 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8221 dhpb.dthpb_nargc = probe->dofpr_nargc;
8222 dhpb.dthpb_xargc = probe->dofpr_xargc;
8223 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8224 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8225
8226 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8227 }
8228 }
8229
8230 static void
8231 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8232 {
8233 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8234 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8235 int i;
8236
8237 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8238
8239 for (i = 0; i < dof->dofh_secnum; i++) {
8240 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8241 dof->dofh_secoff + i * dof->dofh_secsize);
8242
8243 if (sec->dofs_type != DOF_SECT_PROVIDER)
8244 continue;
8245
8246 dtrace_helper_provide_one(dhp, sec, pid);
8247 }
8248
8249 /*
8250 * We may have just created probes, so we must now rematch against
8251 * any retained enablings. Note that this call will acquire both
8252 * cpu_lock and dtrace_lock; the fact that we are holding
8253 * dtrace_meta_lock now is what defines the ordering with respect to
8254 * these three locks.
8255 */
8256 dtrace_enabling_matchall();
8257 }
8258
8259 #if defined(sun)
8260 static void
8261 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8262 {
8263 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8264 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8265 dof_sec_t *str_sec;
8266 dof_provider_t *provider;
8267 char *strtab;
8268 dtrace_helper_provdesc_t dhpv;
8269 dtrace_meta_t *meta = dtrace_meta_pid;
8270 dtrace_mops_t *mops = &meta->dtm_mops;
8271
8272 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8273 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8274 provider->dofpv_strtab * dof->dofh_secsize);
8275
8276 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8277
8278 /*
8279 * Create the provider.
8280 */
8281 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8282
8283 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8284
8285 meta->dtm_count--;
8286 }
8287
8288 static void
8289 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8290 {
8291 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8292 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8293 int i;
8294
8295 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8296
8297 for (i = 0; i < dof->dofh_secnum; i++) {
8298 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8299 dof->dofh_secoff + i * dof->dofh_secsize);
8300
8301 if (sec->dofs_type != DOF_SECT_PROVIDER)
8302 continue;
8303
8304 dtrace_helper_provider_remove_one(dhp, sec, pid);
8305 }
8306 }
8307 #endif
8308
8309 /*
8310 * DTrace Meta Provider-to-Framework API Functions
8311 *
8312 * These functions implement the Meta Provider-to-Framework API, as described
8313 * in <sys/dtrace.h>.
8314 */
8315 int
8316 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8317 dtrace_meta_provider_id_t *idp)
8318 {
8319 dtrace_meta_t *meta;
8320 dtrace_helpers_t *help, *next;
8321 int i;
8322
8323 *idp = DTRACE_METAPROVNONE;
8324
8325 /*
8326 * We strictly don't need the name, but we hold onto it for
8327 * debuggability. All hail error queues!
8328 */
8329 if (name == NULL) {
8330 cmn_err(CE_WARN, "failed to register meta-provider: "
8331 "invalid name");
8332 return (EINVAL);
8333 }
8334
8335 if (mops == NULL ||
8336 mops->dtms_create_probe == NULL ||
8337 mops->dtms_provide_pid == NULL ||
8338 mops->dtms_remove_pid == NULL) {
8339 cmn_err(CE_WARN, "failed to register meta-register %s: "
8340 "invalid ops", name);
8341 return (EINVAL);
8342 }
8343
8344 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8345 meta->dtm_mops = *mops;
8346 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8347 (void) strcpy(meta->dtm_name, name);
8348 meta->dtm_arg = arg;
8349
8350 mutex_enter(&dtrace_meta_lock);
8351 mutex_enter(&dtrace_lock);
8352
8353 if (dtrace_meta_pid != NULL) {
8354 mutex_exit(&dtrace_lock);
8355 mutex_exit(&dtrace_meta_lock);
8356 cmn_err(CE_WARN, "failed to register meta-register %s: "
8357 "user-land meta-provider exists", name);
8358 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8359 kmem_free(meta, sizeof (dtrace_meta_t));
8360 return (EINVAL);
8361 }
8362
8363 dtrace_meta_pid = meta;
8364 *idp = (dtrace_meta_provider_id_t)meta;
8365
8366 /*
8367 * If there are providers and probes ready to go, pass them
8368 * off to the new meta provider now.
8369 */
8370
8371 help = dtrace_deferred_pid;
8372 dtrace_deferred_pid = NULL;
8373
8374 mutex_exit(&dtrace_lock);
8375
8376 while (help != NULL) {
8377 for (i = 0; i < help->dthps_nprovs; i++) {
8378 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8379 help->dthps_pid);
8380 }
8381
8382 next = help->dthps_next;
8383 help->dthps_next = NULL;
8384 help->dthps_prev = NULL;
8385 help->dthps_deferred = 0;
8386 help = next;
8387 }
8388
8389 mutex_exit(&dtrace_meta_lock);
8390
8391 return (0);
8392 }
8393
8394 int
8395 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8396 {
8397 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8398
8399 mutex_enter(&dtrace_meta_lock);
8400 mutex_enter(&dtrace_lock);
8401
8402 if (old == dtrace_meta_pid) {
8403 pp = &dtrace_meta_pid;
8404 } else {
8405 panic("attempt to unregister non-existent "
8406 "dtrace meta-provider %p\n", (void *)old);
8407 }
8408
8409 if (old->dtm_count != 0) {
8410 mutex_exit(&dtrace_lock);
8411 mutex_exit(&dtrace_meta_lock);
8412 return (EBUSY);
8413 }
8414
8415 *pp = NULL;
8416
8417 mutex_exit(&dtrace_lock);
8418 mutex_exit(&dtrace_meta_lock);
8419
8420 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8421 kmem_free(old, sizeof (dtrace_meta_t));
8422
8423 return (0);
8424 }
8425
8426
8427 /*
8428 * DTrace DIF Object Functions
8429 */
8430 static int
8431 dtrace_difo_err(uint_t pc, const char *format, ...)
8432 {
8433 if (dtrace_err_verbose) {
8434 va_list alist;
8435
8436 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8437 va_start(alist, format);
8438 (void) vuprintf(format, alist);
8439 va_end(alist);
8440 }
8441
8442 #ifdef DTRACE_ERRDEBUG
8443 dtrace_errdebug(format);
8444 #endif
8445 return (1);
8446 }
8447
8448 /*
8449 * Validate a DTrace DIF object by checking the IR instructions. The following
8450 * rules are currently enforced by dtrace_difo_validate():
8451 *
8452 * 1. Each instruction must have a valid opcode
8453 * 2. Each register, string, variable, or subroutine reference must be valid
8454 * 3. No instruction can modify register %r0 (must be zero)
8455 * 4. All instruction reserved bits must be set to zero
8456 * 5. The last instruction must be a "ret" instruction
8457 * 6. All branch targets must reference a valid instruction _after_ the branch
8458 */
8459 static int
8460 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8461 cred_t *cr)
8462 {
8463 int err = 0, i;
8464 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8465 int kcheckload;
8466 uint_t pc;
8467
8468 kcheckload = cr == NULL ||
8469 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8470
8471 dp->dtdo_destructive = 0;
8472
8473 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8474 dif_instr_t instr = dp->dtdo_buf[pc];
8475
8476 uint_t r1 = DIF_INSTR_R1(instr);
8477 uint_t r2 = DIF_INSTR_R2(instr);
8478 uint_t rd = DIF_INSTR_RD(instr);
8479 uint_t rs = DIF_INSTR_RS(instr);
8480 uint_t label = DIF_INSTR_LABEL(instr);
8481 uint_t v = DIF_INSTR_VAR(instr);
8482 uint_t subr = DIF_INSTR_SUBR(instr);
8483 uint_t type = DIF_INSTR_TYPE(instr);
8484 uint_t op = DIF_INSTR_OP(instr);
8485
8486 switch (op) {
8487 case DIF_OP_OR:
8488 case DIF_OP_XOR:
8489 case DIF_OP_AND:
8490 case DIF_OP_SLL:
8491 case DIF_OP_SRL:
8492 case DIF_OP_SRA:
8493 case DIF_OP_SUB:
8494 case DIF_OP_ADD:
8495 case DIF_OP_MUL:
8496 case DIF_OP_SDIV:
8497 case DIF_OP_UDIV:
8498 case DIF_OP_SREM:
8499 case DIF_OP_UREM:
8500 case DIF_OP_COPYS:
8501 if (r1 >= nregs)
8502 err += efunc(pc, "invalid register %u\n", r1);
8503 if (r2 >= nregs)
8504 err += efunc(pc, "invalid register %u\n", r2);
8505 if (rd >= nregs)
8506 err += efunc(pc, "invalid register %u\n", rd);
8507 if (rd == 0)
8508 err += efunc(pc, "cannot write to %r0\n");
8509 break;
8510 case DIF_OP_NOT:
8511 case DIF_OP_MOV:
8512 case DIF_OP_ALLOCS:
8513 if (r1 >= nregs)
8514 err += efunc(pc, "invalid register %u\n", r1);
8515 if (r2 != 0)
8516 err += efunc(pc, "non-zero reserved bits\n");
8517 if (rd >= nregs)
8518 err += efunc(pc, "invalid register %u\n", rd);
8519 if (rd == 0)
8520 err += efunc(pc, "cannot write to %r0\n");
8521 break;
8522 case DIF_OP_LDSB:
8523 case DIF_OP_LDSH:
8524 case DIF_OP_LDSW:
8525 case DIF_OP_LDUB:
8526 case DIF_OP_LDUH:
8527 case DIF_OP_LDUW:
8528 case DIF_OP_LDX:
8529 if (r1 >= nregs)
8530 err += efunc(pc, "invalid register %u\n", r1);
8531 if (r2 != 0)
8532 err += efunc(pc, "non-zero reserved bits\n");
8533 if (rd >= nregs)
8534 err += efunc(pc, "invalid register %u\n", rd);
8535 if (rd == 0)
8536 err += efunc(pc, "cannot write to %r0\n");
8537 if (kcheckload)
8538 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8539 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8540 break;
8541 case DIF_OP_RLDSB:
8542 case DIF_OP_RLDSH:
8543 case DIF_OP_RLDSW:
8544 case DIF_OP_RLDUB:
8545 case DIF_OP_RLDUH:
8546 case DIF_OP_RLDUW:
8547 case DIF_OP_RLDX:
8548 if (r1 >= nregs)
8549 err += efunc(pc, "invalid register %u\n", r1);
8550 if (r2 != 0)
8551 err += efunc(pc, "non-zero reserved bits\n");
8552 if (rd >= nregs)
8553 err += efunc(pc, "invalid register %u\n", rd);
8554 if (rd == 0)
8555 err += efunc(pc, "cannot write to %r0\n");
8556 break;
8557 case DIF_OP_ULDSB:
8558 case DIF_OP_ULDSH:
8559 case DIF_OP_ULDSW:
8560 case DIF_OP_ULDUB:
8561 case DIF_OP_ULDUH:
8562 case DIF_OP_ULDUW:
8563 case DIF_OP_ULDX:
8564 if (r1 >= nregs)
8565 err += efunc(pc, "invalid register %u\n", r1);
8566 if (r2 != 0)
8567 err += efunc(pc, "non-zero reserved bits\n");
8568 if (rd >= nregs)
8569 err += efunc(pc, "invalid register %u\n", rd);
8570 if (rd == 0)
8571 err += efunc(pc, "cannot write to %r0\n");
8572 break;
8573 case DIF_OP_STB:
8574 case DIF_OP_STH:
8575 case DIF_OP_STW:
8576 case DIF_OP_STX:
8577 if (r1 >= nregs)
8578 err += efunc(pc, "invalid register %u\n", r1);
8579 if (r2 != 0)
8580 err += efunc(pc, "non-zero reserved bits\n");
8581 if (rd >= nregs)
8582 err += efunc(pc, "invalid register %u\n", rd);
8583 if (rd == 0)
8584 err += efunc(pc, "cannot write to 0 address\n");
8585 break;
8586 case DIF_OP_CMP:
8587 case DIF_OP_SCMP:
8588 if (r1 >= nregs)
8589 err += efunc(pc, "invalid register %u\n", r1);
8590 if (r2 >= nregs)
8591 err += efunc(pc, "invalid register %u\n", r2);
8592 if (rd != 0)
8593 err += efunc(pc, "non-zero reserved bits\n");
8594 break;
8595 case DIF_OP_TST:
8596 if (r1 >= nregs)
8597 err += efunc(pc, "invalid register %u\n", r1);
8598 if (r2 != 0 || rd != 0)
8599 err += efunc(pc, "non-zero reserved bits\n");
8600 break;
8601 case DIF_OP_BA:
8602 case DIF_OP_BE:
8603 case DIF_OP_BNE:
8604 case DIF_OP_BG:
8605 case DIF_OP_BGU:
8606 case DIF_OP_BGE:
8607 case DIF_OP_BGEU:
8608 case DIF_OP_BL:
8609 case DIF_OP_BLU:
8610 case DIF_OP_BLE:
8611 case DIF_OP_BLEU:
8612 if (label >= dp->dtdo_len) {
8613 err += efunc(pc, "invalid branch target %u\n",
8614 label);
8615 }
8616 if (label <= pc) {
8617 err += efunc(pc, "backward branch to %u\n",
8618 label);
8619 }
8620 break;
8621 case DIF_OP_RET:
8622 if (r1 != 0 || r2 != 0)
8623 err += efunc(pc, "non-zero reserved bits\n");
8624 if (rd >= nregs)
8625 err += efunc(pc, "invalid register %u\n", rd);
8626 break;
8627 case DIF_OP_NOP:
8628 case DIF_OP_POPTS:
8629 case DIF_OP_FLUSHTS:
8630 if (r1 != 0 || r2 != 0 || rd != 0)
8631 err += efunc(pc, "non-zero reserved bits\n");
8632 break;
8633 case DIF_OP_SETX:
8634 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8635 err += efunc(pc, "invalid integer ref %u\n",
8636 DIF_INSTR_INTEGER(instr));
8637 }
8638 if (rd >= nregs)
8639 err += efunc(pc, "invalid register %u\n", rd);
8640 if (rd == 0)
8641 err += efunc(pc, "cannot write to %r0\n");
8642 break;
8643 case DIF_OP_SETS:
8644 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8645 err += efunc(pc, "invalid string ref %u\n",
8646 DIF_INSTR_STRING(instr));
8647 }
8648 if (rd >= nregs)
8649 err += efunc(pc, "invalid register %u\n", rd);
8650 if (rd == 0)
8651 err += efunc(pc, "cannot write to %r0\n");
8652 break;
8653 case DIF_OP_LDGA:
8654 case DIF_OP_LDTA:
8655 if (r1 > DIF_VAR_ARRAY_MAX)
8656 err += efunc(pc, "invalid array %u\n", r1);
8657 if (r2 >= nregs)
8658 err += efunc(pc, "invalid register %u\n", r2);
8659 if (rd >= nregs)
8660 err += efunc(pc, "invalid register %u\n", rd);
8661 if (rd == 0)
8662 err += efunc(pc, "cannot write to %r0\n");
8663 break;
8664 case DIF_OP_LDGS:
8665 case DIF_OP_LDTS:
8666 case DIF_OP_LDLS:
8667 case DIF_OP_LDGAA:
8668 case DIF_OP_LDTAA:
8669 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8670 err += efunc(pc, "invalid variable %u\n", v);
8671 if (rd >= nregs)
8672 err += efunc(pc, "invalid register %u\n", rd);
8673 if (rd == 0)
8674 err += efunc(pc, "cannot write to %r0\n");
8675 break;
8676 case DIF_OP_STGS:
8677 case DIF_OP_STTS:
8678 case DIF_OP_STLS:
8679 case DIF_OP_STGAA:
8680 case DIF_OP_STTAA:
8681 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8682 err += efunc(pc, "invalid variable %u\n", v);
8683 if (rs >= nregs)
8684 err += efunc(pc, "invalid register %u\n", rd);
8685 break;
8686 case DIF_OP_CALL:
8687 if (subr > DIF_SUBR_MAX)
8688 err += efunc(pc, "invalid subr %u\n", subr);
8689 if (rd >= nregs)
8690 err += efunc(pc, "invalid register %u\n", rd);
8691 if (rd == 0)
8692 err += efunc(pc, "cannot write to %r0\n");
8693
8694 if (subr == DIF_SUBR_COPYOUT ||
8695 subr == DIF_SUBR_COPYOUTSTR) {
8696 dp->dtdo_destructive = 1;
8697 }
8698 break;
8699 case DIF_OP_PUSHTR:
8700 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8701 err += efunc(pc, "invalid ref type %u\n", type);
8702 if (r2 >= nregs)
8703 err += efunc(pc, "invalid register %u\n", r2);
8704 if (rs >= nregs)
8705 err += efunc(pc, "invalid register %u\n", rs);
8706 break;
8707 case DIF_OP_PUSHTV:
8708 if (type != DIF_TYPE_CTF)
8709 err += efunc(pc, "invalid val type %u\n", type);
8710 if (r2 >= nregs)
8711 err += efunc(pc, "invalid register %u\n", r2);
8712 if (rs >= nregs)
8713 err += efunc(pc, "invalid register %u\n", rs);
8714 break;
8715 default:
8716 err += efunc(pc, "invalid opcode %u\n",
8717 DIF_INSTR_OP(instr));
8718 }
8719 }
8720
8721 if (dp->dtdo_len != 0 &&
8722 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8723 err += efunc(dp->dtdo_len - 1,
8724 "expected 'ret' as last DIF instruction\n");
8725 }
8726
8727 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8728 /*
8729 * If we're not returning by reference, the size must be either
8730 * 0 or the size of one of the base types.
8731 */
8732 switch (dp->dtdo_rtype.dtdt_size) {
8733 case 0:
8734 case sizeof (uint8_t):
8735 case sizeof (uint16_t):
8736 case sizeof (uint32_t):
8737 case sizeof (uint64_t):
8738 break;
8739
8740 default:
8741 err += efunc(dp->dtdo_len - 1, "bad return size\n");
8742 }
8743 }
8744
8745 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8746 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8747 dtrace_diftype_t *vt, *et;
8748 uint_t id, ndx;
8749
8750 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8751 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8752 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8753 err += efunc(i, "unrecognized variable scope %d\n",
8754 v->dtdv_scope);
8755 break;
8756 }
8757
8758 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8759 v->dtdv_kind != DIFV_KIND_SCALAR) {
8760 err += efunc(i, "unrecognized variable type %d\n",
8761 v->dtdv_kind);
8762 break;
8763 }
8764
8765 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8766 err += efunc(i, "%d exceeds variable id limit\n", id);
8767 break;
8768 }
8769
8770 if (id < DIF_VAR_OTHER_UBASE)
8771 continue;
8772
8773 /*
8774 * For user-defined variables, we need to check that this
8775 * definition is identical to any previous definition that we
8776 * encountered.
8777 */
8778 ndx = id - DIF_VAR_OTHER_UBASE;
8779
8780 switch (v->dtdv_scope) {
8781 case DIFV_SCOPE_GLOBAL:
8782 if (ndx < vstate->dtvs_nglobals) {
8783 dtrace_statvar_t *svar;
8784
8785 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8786 existing = &svar->dtsv_var;
8787 }
8788
8789 break;
8790
8791 case DIFV_SCOPE_THREAD:
8792 if (ndx < vstate->dtvs_ntlocals)
8793 existing = &vstate->dtvs_tlocals[ndx];
8794 break;
8795
8796 case DIFV_SCOPE_LOCAL:
8797 if (ndx < vstate->dtvs_nlocals) {
8798 dtrace_statvar_t *svar;
8799
8800 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8801 existing = &svar->dtsv_var;
8802 }
8803
8804 break;
8805 }
8806
8807 vt = &v->dtdv_type;
8808
8809 if (vt->dtdt_flags & DIF_TF_BYREF) {
8810 if (vt->dtdt_size == 0) {
8811 err += efunc(i, "zero-sized variable\n");
8812 break;
8813 }
8814
8815 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8816 vt->dtdt_size > dtrace_global_maxsize) {
8817 err += efunc(i, "oversized by-ref global\n");
8818 break;
8819 }
8820 }
8821
8822 if (existing == NULL || existing->dtdv_id == 0)
8823 continue;
8824
8825 ASSERT(existing->dtdv_id == v->dtdv_id);
8826 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8827
8828 if (existing->dtdv_kind != v->dtdv_kind)
8829 err += efunc(i, "%d changed variable kind\n", id);
8830
8831 et = &existing->dtdv_type;
8832
8833 if (vt->dtdt_flags != et->dtdt_flags) {
8834 err += efunc(i, "%d changed variable type flags\n", id);
8835 break;
8836 }
8837
8838 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8839 err += efunc(i, "%d changed variable type size\n", id);
8840 break;
8841 }
8842 }
8843
8844 return (err);
8845 }
8846
8847 #if defined(sun)
8848 /*
8849 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8850 * are much more constrained than normal DIFOs. Specifically, they may
8851 * not:
8852 *
8853 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8854 * miscellaneous string routines
8855 * 2. Access DTrace variables other than the args[] array, and the
8856 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8857 * 3. Have thread-local variables.
8858 * 4. Have dynamic variables.
8859 */
8860 static int
8861 dtrace_difo_validate_helper(dtrace_difo_t *dp)
8862 {
8863 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8864 int err = 0;
8865 uint_t pc;
8866
8867 for (pc = 0; pc < dp->dtdo_len; pc++) {
8868 dif_instr_t instr = dp->dtdo_buf[pc];
8869
8870 uint_t v = DIF_INSTR_VAR(instr);
8871 uint_t subr = DIF_INSTR_SUBR(instr);
8872 uint_t op = DIF_INSTR_OP(instr);
8873
8874 switch (op) {
8875 case DIF_OP_OR:
8876 case DIF_OP_XOR:
8877 case DIF_OP_AND:
8878 case DIF_OP_SLL:
8879 case DIF_OP_SRL:
8880 case DIF_OP_SRA:
8881 case DIF_OP_SUB:
8882 case DIF_OP_ADD:
8883 case DIF_OP_MUL:
8884 case DIF_OP_SDIV:
8885 case DIF_OP_UDIV:
8886 case DIF_OP_SREM:
8887 case DIF_OP_UREM:
8888 case DIF_OP_COPYS:
8889 case DIF_OP_NOT:
8890 case DIF_OP_MOV:
8891 case DIF_OP_RLDSB:
8892 case DIF_OP_RLDSH:
8893 case DIF_OP_RLDSW:
8894 case DIF_OP_RLDUB:
8895 case DIF_OP_RLDUH:
8896 case DIF_OP_RLDUW:
8897 case DIF_OP_RLDX:
8898 case DIF_OP_ULDSB:
8899 case DIF_OP_ULDSH:
8900 case DIF_OP_ULDSW:
8901 case DIF_OP_ULDUB:
8902 case DIF_OP_ULDUH:
8903 case DIF_OP_ULDUW:
8904 case DIF_OP_ULDX:
8905 case DIF_OP_STB:
8906 case DIF_OP_STH:
8907 case DIF_OP_STW:
8908 case DIF_OP_STX:
8909 case DIF_OP_ALLOCS:
8910 case DIF_OP_CMP:
8911 case DIF_OP_SCMP:
8912 case DIF_OP_TST:
8913 case DIF_OP_BA:
8914 case DIF_OP_BE:
8915 case DIF_OP_BNE:
8916 case DIF_OP_BG:
8917 case DIF_OP_BGU:
8918 case DIF_OP_BGE:
8919 case DIF_OP_BGEU:
8920 case DIF_OP_BL:
8921 case DIF_OP_BLU:
8922 case DIF_OP_BLE:
8923 case DIF_OP_BLEU:
8924 case DIF_OP_RET:
8925 case DIF_OP_NOP:
8926 case DIF_OP_POPTS:
8927 case DIF_OP_FLUSHTS:
8928 case DIF_OP_SETX:
8929 case DIF_OP_SETS:
8930 case DIF_OP_LDGA:
8931 case DIF_OP_LDLS:
8932 case DIF_OP_STGS:
8933 case DIF_OP_STLS:
8934 case DIF_OP_PUSHTR:
8935 case DIF_OP_PUSHTV:
8936 break;
8937
8938 case DIF_OP_LDGS:
8939 if (v >= DIF_VAR_OTHER_UBASE)
8940 break;
8941
8942 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8943 break;
8944
8945 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8946 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8947 v == DIF_VAR_EXECARGS ||
8948 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8949 v == DIF_VAR_UID || v == DIF_VAR_GID)
8950 break;
8951
8952 err += efunc(pc, "illegal variable %u\n", v);
8953 break;
8954
8955 case DIF_OP_LDTA:
8956 case DIF_OP_LDTS:
8957 case DIF_OP_LDGAA:
8958 case DIF_OP_LDTAA:
8959 err += efunc(pc, "illegal dynamic variable load\n");
8960 break;
8961
8962 case DIF_OP_STTS:
8963 case DIF_OP_STGAA:
8964 case DIF_OP_STTAA:
8965 err += efunc(pc, "illegal dynamic variable store\n");
8966 break;
8967
8968 case DIF_OP_CALL:
8969 if (subr == DIF_SUBR_ALLOCA ||
8970 subr == DIF_SUBR_BCOPY ||
8971 subr == DIF_SUBR_COPYIN ||
8972 subr == DIF_SUBR_COPYINTO ||
8973 subr == DIF_SUBR_COPYINSTR ||
8974 subr == DIF_SUBR_INDEX ||
8975 subr == DIF_SUBR_INET_NTOA ||
8976 subr == DIF_SUBR_INET_NTOA6 ||
8977 subr == DIF_SUBR_INET_NTOP ||
8978 subr == DIF_SUBR_LLTOSTR ||
8979 subr == DIF_SUBR_RINDEX ||
8980 subr == DIF_SUBR_STRCHR ||
8981 subr == DIF_SUBR_STRJOIN ||
8982 subr == DIF_SUBR_STRRCHR ||
8983 subr == DIF_SUBR_STRSTR ||
8984 subr == DIF_SUBR_HTONS ||
8985 subr == DIF_SUBR_HTONL ||
8986 subr == DIF_SUBR_HTONLL ||
8987 subr == DIF_SUBR_NTOHS ||
8988 subr == DIF_SUBR_NTOHL ||
8989 subr == DIF_SUBR_NTOHLL ||
8990 subr == DIF_SUBR_MEMREF ||
8991 subr == DIF_SUBR_TYPEREF)
8992 break;
8993
8994 err += efunc(pc, "invalid subr %u\n", subr);
8995 break;
8996
8997 default:
8998 err += efunc(pc, "invalid opcode %u\n",
8999 DIF_INSTR_OP(instr));
9000 }
9001 }
9002
9003 return (err);
9004 }
9005 #endif
9006
9007 /*
9008 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9009 * basis; 0 if not.
9010 */
9011 static int
9012 dtrace_difo_cacheable(dtrace_difo_t *dp)
9013 {
9014 int i;
9015
9016 if (dp == NULL)
9017 return (0);
9018
9019 for (i = 0; i < dp->dtdo_varlen; i++) {
9020 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9021
9022 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9023 continue;
9024
9025 switch (v->dtdv_id) {
9026 case DIF_VAR_CURTHREAD:
9027 case DIF_VAR_PID:
9028 case DIF_VAR_TID:
9029 case DIF_VAR_EXECARGS:
9030 case DIF_VAR_EXECNAME:
9031 case DIF_VAR_ZONENAME:
9032 break;
9033
9034 default:
9035 return (0);
9036 }
9037 }
9038
9039 /*
9040 * This DIF object may be cacheable. Now we need to look for any
9041 * array loading instructions, any memory loading instructions, or
9042 * any stores to thread-local variables.
9043 */
9044 for (i = 0; i < dp->dtdo_len; i++) {
9045 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9046
9047 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9048 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9049 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9050 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9051 return (0);
9052 }
9053
9054 return (1);
9055 }
9056
9057 static void
9058 dtrace_difo_hold(dtrace_difo_t *dp)
9059 {
9060 int i;
9061
9062 ASSERT(MUTEX_HELD(&dtrace_lock));
9063
9064 dp->dtdo_refcnt++;
9065 ASSERT(dp->dtdo_refcnt != 0);
9066
9067 /*
9068 * We need to check this DIF object for references to the variable
9069 * DIF_VAR_VTIMESTAMP.
9070 */
9071 for (i = 0; i < dp->dtdo_varlen; i++) {
9072 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9073
9074 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9075 continue;
9076
9077 if (dtrace_vtime_references++ == 0)
9078 dtrace_vtime_enable();
9079 }
9080 }
9081
9082 /*
9083 * This routine calculates the dynamic variable chunksize for a given DIF
9084 * object. The calculation is not fool-proof, and can probably be tricked by
9085 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9086 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9087 * if a dynamic variable size exceeds the chunksize.
9088 */
9089 static void
9090 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9091 {
9092 uint64_t sval = 0;
9093 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9094 const dif_instr_t *text = dp->dtdo_buf;
9095 uint_t pc, srd = 0;
9096 uint_t ttop = 0;
9097 size_t size, ksize;
9098 uint_t id, i;
9099
9100 for (pc = 0; pc < dp->dtdo_len; pc++) {
9101 dif_instr_t instr = text[pc];
9102 uint_t op = DIF_INSTR_OP(instr);
9103 uint_t rd = DIF_INSTR_RD(instr);
9104 uint_t r1 = DIF_INSTR_R1(instr);
9105 uint_t nkeys = 0;
9106 uchar_t scope = 0;
9107
9108 dtrace_key_t *key = tupregs;
9109
9110 switch (op) {
9111 case DIF_OP_SETX:
9112 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9113 srd = rd;
9114 continue;
9115
9116 case DIF_OP_STTS:
9117 key = &tupregs[DIF_DTR_NREGS];
9118 key[0].dttk_size = 0;
9119 key[1].dttk_size = 0;
9120 nkeys = 2;
9121 scope = DIFV_SCOPE_THREAD;
9122 break;
9123
9124 case DIF_OP_STGAA:
9125 case DIF_OP_STTAA:
9126 nkeys = ttop;
9127
9128 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9129 key[nkeys++].dttk_size = 0;
9130
9131 key[nkeys++].dttk_size = 0;
9132
9133 if (op == DIF_OP_STTAA) {
9134 scope = DIFV_SCOPE_THREAD;
9135 } else {
9136 scope = DIFV_SCOPE_GLOBAL;
9137 }
9138
9139 break;
9140
9141 case DIF_OP_PUSHTR:
9142 if (ttop == DIF_DTR_NREGS)
9143 return;
9144
9145 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9146 /*
9147 * If the register for the size of the "pushtr"
9148 * is %r0 (or the value is 0) and the type is
9149 * a string, we'll use the system-wide default
9150 * string size.
9151 */
9152 tupregs[ttop++].dttk_size =
9153 dtrace_strsize_default;
9154 } else {
9155 if (srd == 0)
9156 return;
9157
9158 tupregs[ttop++].dttk_size = sval;
9159 }
9160
9161 break;
9162
9163 case DIF_OP_PUSHTV:
9164 if (ttop == DIF_DTR_NREGS)
9165 return;
9166
9167 tupregs[ttop++].dttk_size = 0;
9168 break;
9169
9170 case DIF_OP_FLUSHTS:
9171 ttop = 0;
9172 break;
9173
9174 case DIF_OP_POPTS:
9175 if (ttop != 0)
9176 ttop--;
9177 break;
9178 }
9179
9180 sval = 0;
9181 srd = 0;
9182
9183 if (nkeys == 0)
9184 continue;
9185
9186 /*
9187 * We have a dynamic variable allocation; calculate its size.
9188 */
9189 for (ksize = 0, i = 0; i < nkeys; i++)
9190 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9191
9192 size = sizeof (dtrace_dynvar_t);
9193 size += sizeof (dtrace_key_t) * (nkeys - 1);
9194 size += ksize;
9195
9196 /*
9197 * Now we need to determine the size of the stored data.
9198 */
9199 id = DIF_INSTR_VAR(instr);
9200
9201 for (i = 0; i < dp->dtdo_varlen; i++) {
9202 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9203
9204 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9205 size += v->dtdv_type.dtdt_size;
9206 break;
9207 }
9208 }
9209
9210 if (i == dp->dtdo_varlen)
9211 return;
9212
9213 /*
9214 * We have the size. If this is larger than the chunk size
9215 * for our dynamic variable state, reset the chunk size.
9216 */
9217 size = P2ROUNDUP(size, sizeof (uint64_t));
9218
9219 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9220 vstate->dtvs_dynvars.dtds_chunksize = size;
9221 }
9222 }
9223
9224 static void
9225 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9226 {
9227 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9228 uint_t id;
9229
9230 ASSERT(MUTEX_HELD(&dtrace_lock));
9231 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9232
9233 for (i = 0; i < dp->dtdo_varlen; i++) {
9234 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9235 dtrace_statvar_t *svar, ***svarp = NULL;
9236 size_t dsize = 0;
9237 uint8_t scope = v->dtdv_scope;
9238 int *np = NULL;
9239
9240 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9241 continue;
9242
9243 id -= DIF_VAR_OTHER_UBASE;
9244
9245 switch (scope) {
9246 case DIFV_SCOPE_THREAD:
9247 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9248 dtrace_difv_t *tlocals;
9249
9250 if ((ntlocals = (otlocals << 1)) == 0)
9251 ntlocals = 1;
9252
9253 osz = otlocals * sizeof (dtrace_difv_t);
9254 nsz = ntlocals * sizeof (dtrace_difv_t);
9255
9256 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9257
9258 if (osz != 0) {
9259 bcopy(vstate->dtvs_tlocals,
9260 tlocals, osz);
9261 kmem_free(vstate->dtvs_tlocals, osz);
9262 }
9263
9264 vstate->dtvs_tlocals = tlocals;
9265 vstate->dtvs_ntlocals = ntlocals;
9266 }
9267
9268 vstate->dtvs_tlocals[id] = *v;
9269 continue;
9270
9271 case DIFV_SCOPE_LOCAL:
9272 np = &vstate->dtvs_nlocals;
9273 svarp = &vstate->dtvs_locals;
9274
9275 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9276 dsize = NCPU * (v->dtdv_type.dtdt_size +
9277 sizeof (uint64_t));
9278 else
9279 dsize = NCPU * sizeof (uint64_t);
9280
9281 break;
9282
9283 case DIFV_SCOPE_GLOBAL:
9284 np = &vstate->dtvs_nglobals;
9285 svarp = &vstate->dtvs_globals;
9286
9287 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9288 dsize = v->dtdv_type.dtdt_size +
9289 sizeof (uint64_t);
9290
9291 break;
9292
9293 default:
9294 ASSERT(0);
9295 }
9296
9297 while (id >= (oldsvars = *np)) {
9298 dtrace_statvar_t **statics;
9299 int newsvars, oldsize, newsize;
9300
9301 if ((newsvars = (oldsvars << 1)) == 0)
9302 newsvars = 1;
9303
9304 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9305 newsize = newsvars * sizeof (dtrace_statvar_t *);
9306
9307 statics = kmem_zalloc(newsize, KM_SLEEP);
9308
9309 if (oldsize != 0) {
9310 bcopy(*svarp, statics, oldsize);
9311 kmem_free(*svarp, oldsize);
9312 }
9313
9314 *svarp = statics;
9315 *np = newsvars;
9316 }
9317
9318 if ((svar = (*svarp)[id]) == NULL) {
9319 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9320 svar->dtsv_var = *v;
9321
9322 if ((svar->dtsv_size = dsize) != 0) {
9323 svar->dtsv_data = (uint64_t)(uintptr_t)
9324 kmem_zalloc(dsize, KM_SLEEP);
9325 }
9326
9327 (*svarp)[id] = svar;
9328 }
9329
9330 svar->dtsv_refcnt++;
9331 }
9332
9333 dtrace_difo_chunksize(dp, vstate);
9334 dtrace_difo_hold(dp);
9335 }
9336
9337 #if defined(sun)
9338 static dtrace_difo_t *
9339 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9340 {
9341 dtrace_difo_t *new;
9342 size_t sz;
9343
9344 ASSERT(dp->dtdo_buf != NULL);
9345 ASSERT(dp->dtdo_refcnt != 0);
9346
9347 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9348
9349 ASSERT(dp->dtdo_buf != NULL);
9350 sz = dp->dtdo_len * sizeof (dif_instr_t);
9351 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9352 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9353 new->dtdo_len = dp->dtdo_len;
9354
9355 if (dp->dtdo_strtab != NULL) {
9356 ASSERT(dp->dtdo_strlen != 0);
9357 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9358 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9359 new->dtdo_strlen = dp->dtdo_strlen;
9360 }
9361
9362 if (dp->dtdo_inttab != NULL) {
9363 ASSERT(dp->dtdo_intlen != 0);
9364 sz = dp->dtdo_intlen * sizeof (uint64_t);
9365 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9366 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9367 new->dtdo_intlen = dp->dtdo_intlen;
9368 }
9369
9370 if (dp->dtdo_vartab != NULL) {
9371 ASSERT(dp->dtdo_varlen != 0);
9372 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9373 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9374 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9375 new->dtdo_varlen = dp->dtdo_varlen;
9376 }
9377
9378 dtrace_difo_init(new, vstate);
9379 return (new);
9380 }
9381 #endif
9382
9383 static void
9384 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9385 {
9386 int i;
9387
9388 ASSERT(dp->dtdo_refcnt == 0);
9389
9390 for (i = 0; i < dp->dtdo_varlen; i++) {
9391 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9392 dtrace_statvar_t *svar, **svarp = NULL;
9393 uint_t id;
9394 uint8_t scope = v->dtdv_scope;
9395 int *np = NULL;
9396
9397 switch (scope) {
9398 case DIFV_SCOPE_THREAD:
9399 continue;
9400
9401 case DIFV_SCOPE_LOCAL:
9402 np = &vstate->dtvs_nlocals;
9403 svarp = vstate->dtvs_locals;
9404 break;
9405
9406 case DIFV_SCOPE_GLOBAL:
9407 np = &vstate->dtvs_nglobals;
9408 svarp = vstate->dtvs_globals;
9409 break;
9410
9411 default:
9412 ASSERT(0);
9413 }
9414
9415 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9416 continue;
9417
9418 id -= DIF_VAR_OTHER_UBASE;
9419 ASSERT(id < *np);
9420
9421 svar = svarp[id];
9422 ASSERT(svar != NULL);
9423 ASSERT(svar->dtsv_refcnt > 0);
9424
9425 if (--svar->dtsv_refcnt > 0)
9426 continue;
9427
9428 if (svar->dtsv_size != 0) {
9429 ASSERT(svar->dtsv_data != 0);
9430 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9431 svar->dtsv_size);
9432 }
9433
9434 kmem_free(svar, sizeof (dtrace_statvar_t));
9435 svarp[id] = NULL;
9436 }
9437
9438 if (dp->dtdo_buf != NULL)
9439 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9440 if (dp->dtdo_inttab != NULL)
9441 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9442 if (dp->dtdo_strtab != NULL)
9443 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9444 if (dp->dtdo_vartab != NULL)
9445 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9446
9447 kmem_free(dp, sizeof (dtrace_difo_t));
9448 }
9449
9450 static void
9451 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9452 {
9453 int i;
9454
9455 ASSERT(MUTEX_HELD(&dtrace_lock));
9456 ASSERT(dp->dtdo_refcnt != 0);
9457
9458 for (i = 0; i < dp->dtdo_varlen; i++) {
9459 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9460
9461 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9462 continue;
9463
9464 ASSERT(dtrace_vtime_references > 0);
9465 if (--dtrace_vtime_references == 0)
9466 dtrace_vtime_disable();
9467 }
9468
9469 if (--dp->dtdo_refcnt == 0)
9470 dtrace_difo_destroy(dp, vstate);
9471 }
9472
9473 /*
9474 * DTrace Format Functions
9475 */
9476 static uint16_t
9477 dtrace_format_add(dtrace_state_t *state, char *str)
9478 {
9479 char *fmt, **new;
9480 uint16_t ndx, len = strlen(str) + 1;
9481
9482 fmt = kmem_zalloc(len, KM_SLEEP);
9483 bcopy(str, fmt, len);
9484
9485 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9486 if (state->dts_formats[ndx] == NULL) {
9487 state->dts_formats[ndx] = fmt;
9488 return (ndx + 1);
9489 }
9490 }
9491
9492 if (state->dts_nformats == USHRT_MAX) {
9493 /*
9494 * This is only likely if a denial-of-service attack is being
9495 * attempted. As such, it's okay to fail silently here.
9496 */
9497 kmem_free(fmt, len);
9498 return (0);
9499 }
9500
9501 /*
9502 * For simplicity, we always resize the formats array to be exactly the
9503 * number of formats.
9504 */
9505 ndx = state->dts_nformats++;
9506 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9507
9508 if (state->dts_formats != NULL) {
9509 ASSERT(ndx != 0);
9510 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9511 kmem_free(state->dts_formats, ndx * sizeof (char *));
9512 }
9513
9514 state->dts_formats = new;
9515 state->dts_formats[ndx] = fmt;
9516
9517 return (ndx + 1);
9518 }
9519
9520 static void
9521 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9522 {
9523 char *fmt;
9524
9525 ASSERT(state->dts_formats != NULL);
9526 ASSERT(format <= state->dts_nformats);
9527 ASSERT(state->dts_formats[format - 1] != NULL);
9528
9529 fmt = state->dts_formats[format - 1];
9530 kmem_free(fmt, strlen(fmt) + 1);
9531 state->dts_formats[format - 1] = NULL;
9532 }
9533
9534 static void
9535 dtrace_format_destroy(dtrace_state_t *state)
9536 {
9537 int i;
9538
9539 if (state->dts_nformats == 0) {
9540 ASSERT(state->dts_formats == NULL);
9541 return;
9542 }
9543
9544 ASSERT(state->dts_formats != NULL);
9545
9546 for (i = 0; i < state->dts_nformats; i++) {
9547 char *fmt = state->dts_formats[i];
9548
9549 if (fmt == NULL)
9550 continue;
9551
9552 kmem_free(fmt, strlen(fmt) + 1);
9553 }
9554
9555 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9556 state->dts_nformats = 0;
9557 state->dts_formats = NULL;
9558 }
9559
9560 /*
9561 * DTrace Predicate Functions
9562 */
9563 static dtrace_predicate_t *
9564 dtrace_predicate_create(dtrace_difo_t *dp)
9565 {
9566 dtrace_predicate_t *pred;
9567
9568 ASSERT(MUTEX_HELD(&dtrace_lock));
9569 ASSERT(dp->dtdo_refcnt != 0);
9570
9571 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9572 pred->dtp_difo = dp;
9573 pred->dtp_refcnt = 1;
9574
9575 if (!dtrace_difo_cacheable(dp))
9576 return (pred);
9577
9578 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9579 /*
9580 * This is only theoretically possible -- we have had 2^32
9581 * cacheable predicates on this machine. We cannot allow any
9582 * more predicates to become cacheable: as unlikely as it is,
9583 * there may be a thread caching a (now stale) predicate cache
9584 * ID. (N.B.: the temptation is being successfully resisted to
9585 * have this cmn_err() "Holy shit -- we executed this code!")
9586 */
9587 return (pred);
9588 }
9589
9590 pred->dtp_cacheid = dtrace_predcache_id++;
9591
9592 return (pred);
9593 }
9594
9595 static void
9596 dtrace_predicate_hold(dtrace_predicate_t *pred)
9597 {
9598 ASSERT(MUTEX_HELD(&dtrace_lock));
9599 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9600 ASSERT(pred->dtp_refcnt > 0);
9601
9602 pred->dtp_refcnt++;
9603 }
9604
9605 static void
9606 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9607 {
9608 dtrace_difo_t *dp = pred->dtp_difo;
9609
9610 ASSERT(MUTEX_HELD(&dtrace_lock));
9611 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9612 ASSERT(pred->dtp_refcnt > 0);
9613
9614 if (--pred->dtp_refcnt == 0) {
9615 dtrace_difo_release(pred->dtp_difo, vstate);
9616 kmem_free(pred, sizeof (dtrace_predicate_t));
9617 }
9618 }
9619
9620 /*
9621 * DTrace Action Description Functions
9622 */
9623 static dtrace_actdesc_t *
9624 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9625 uint64_t uarg, uint64_t arg)
9626 {
9627 dtrace_actdesc_t *act;
9628
9629 #if defined(sun)
9630 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9631 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9632 #endif
9633
9634 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9635 act->dtad_kind = kind;
9636 act->dtad_ntuple = ntuple;
9637 act->dtad_uarg = uarg;
9638 act->dtad_arg = arg;
9639 act->dtad_refcnt = 1;
9640
9641 return (act);
9642 }
9643
9644 static void
9645 dtrace_actdesc_hold(dtrace_actdesc_t *act)
9646 {
9647 ASSERT(act->dtad_refcnt >= 1);
9648 act->dtad_refcnt++;
9649 }
9650
9651 static void
9652 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9653 {
9654 dtrace_actkind_t kind = act->dtad_kind;
9655 dtrace_difo_t *dp;
9656
9657 ASSERT(act->dtad_refcnt >= 1);
9658
9659 if (--act->dtad_refcnt != 0)
9660 return;
9661
9662 if ((dp = act->dtad_difo) != NULL)
9663 dtrace_difo_release(dp, vstate);
9664
9665 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9666 char *str = (char *)(uintptr_t)act->dtad_arg;
9667
9668 #if defined(sun)
9669 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9670 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9671 #endif
9672
9673 if (str != NULL)
9674 kmem_free(str, strlen(str) + 1);
9675 }
9676
9677 kmem_free(act, sizeof (dtrace_actdesc_t));
9678 }
9679
9680 /*
9681 * DTrace ECB Functions
9682 */
9683 static dtrace_ecb_t *
9684 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9685 {
9686 dtrace_ecb_t *ecb;
9687 dtrace_epid_t epid;
9688
9689 ASSERT(MUTEX_HELD(&dtrace_lock));
9690
9691 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9692 ecb->dte_predicate = NULL;
9693 ecb->dte_probe = probe;
9694
9695 /*
9696 * The default size is the size of the default action: recording
9697 * the epid.
9698 */
9699 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9700 ecb->dte_alignment = sizeof (dtrace_epid_t);
9701
9702 epid = state->dts_epid++;
9703
9704 if (epid - 1 >= state->dts_necbs) {
9705 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9706 int necbs = state->dts_necbs << 1;
9707
9708 ASSERT(epid == state->dts_necbs + 1);
9709
9710 if (necbs == 0) {
9711 ASSERT(oecbs == NULL);
9712 necbs = 1;
9713 }
9714
9715 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9716
9717 if (oecbs != NULL)
9718 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9719
9720 dtrace_membar_producer();
9721 state->dts_ecbs = ecbs;
9722
9723 if (oecbs != NULL) {
9724 /*
9725 * If this state is active, we must dtrace_sync()
9726 * before we can free the old dts_ecbs array: we're
9727 * coming in hot, and there may be active ring
9728 * buffer processing (which indexes into the dts_ecbs
9729 * array) on another CPU.
9730 */
9731 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9732 dtrace_sync();
9733
9734 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9735 }
9736
9737 dtrace_membar_producer();
9738 state->dts_necbs = necbs;
9739 }
9740
9741 ecb->dte_state = state;
9742
9743 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9744 dtrace_membar_producer();
9745 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9746
9747 return (ecb);
9748 }
9749
9750 static int
9751 dtrace_ecb_enable(dtrace_ecb_t *ecb)
9752 {
9753 dtrace_probe_t *probe = ecb->dte_probe;
9754
9755 ASSERT(MUTEX_HELD(&cpu_lock));
9756 ASSERT(MUTEX_HELD(&dtrace_lock));
9757 ASSERT(ecb->dte_next == NULL);
9758
9759 if (probe == NULL) {
9760 /*
9761 * This is the NULL probe -- there's nothing to do.
9762 */
9763 return (0);
9764 }
9765
9766 if (probe->dtpr_ecb == NULL) {
9767 dtrace_provider_t *prov = probe->dtpr_provider;
9768
9769 /*
9770 * We're the first ECB on this probe.
9771 */
9772 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9773
9774 if (ecb->dte_predicate != NULL)
9775 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9776
9777 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9778 probe->dtpr_id, probe->dtpr_arg));
9779 } else {
9780 /*
9781 * This probe is already active. Swing the last pointer to
9782 * point to the new ECB, and issue a dtrace_sync() to assure
9783 * that all CPUs have seen the change.
9784 */
9785 ASSERT(probe->dtpr_ecb_last != NULL);
9786 probe->dtpr_ecb_last->dte_next = ecb;
9787 probe->dtpr_ecb_last = ecb;
9788 probe->dtpr_predcache = 0;
9789
9790 dtrace_sync();
9791 return (0);
9792 }
9793 }
9794
9795 static void
9796 dtrace_ecb_resize(dtrace_ecb_t *ecb)
9797 {
9798 uint32_t maxalign = sizeof (dtrace_epid_t);
9799 uint32_t align = sizeof (uint8_t), offs, diff;
9800 dtrace_action_t *act;
9801 int wastuple = 0;
9802 uint32_t aggbase = UINT32_MAX;
9803 dtrace_state_t *state = ecb->dte_state;
9804
9805 /*
9806 * If we record anything, we always record the epid. (And we always
9807 * record it first.)
9808 */
9809 offs = sizeof (dtrace_epid_t);
9810 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9811
9812 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9813 dtrace_recdesc_t *rec = &act->dta_rec;
9814
9815 if ((align = rec->dtrd_alignment) > maxalign)
9816 maxalign = align;
9817
9818 if (!wastuple && act->dta_intuple) {
9819 /*
9820 * This is the first record in a tuple. Align the
9821 * offset to be at offset 4 in an 8-byte aligned
9822 * block.
9823 */
9824 diff = offs + sizeof (dtrace_aggid_t);
9825
9826 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9827 offs += sizeof (uint64_t) - diff;
9828
9829 aggbase = offs - sizeof (dtrace_aggid_t);
9830 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9831 }
9832
9833 /*LINTED*/
9834 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9835 /*
9836 * The current offset is not properly aligned; align it.
9837 */
9838 offs += align - diff;
9839 }
9840
9841 rec->dtrd_offset = offs;
9842
9843 if (offs + rec->dtrd_size > ecb->dte_needed) {
9844 ecb->dte_needed = offs + rec->dtrd_size;
9845
9846 if (ecb->dte_needed > state->dts_needed)
9847 state->dts_needed = ecb->dte_needed;
9848 }
9849
9850 if (DTRACEACT_ISAGG(act->dta_kind)) {
9851 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9852 dtrace_action_t *first = agg->dtag_first, *prev;
9853
9854 ASSERT(rec->dtrd_size != 0 && first != NULL);
9855 ASSERT(wastuple);
9856 ASSERT(aggbase != UINT32_MAX);
9857
9858 agg->dtag_base = aggbase;
9859
9860 while ((prev = first->dta_prev) != NULL &&
9861 DTRACEACT_ISAGG(prev->dta_kind)) {
9862 agg = (dtrace_aggregation_t *)prev;
9863 first = agg->dtag_first;
9864 }
9865
9866 if (prev != NULL) {
9867 offs = prev->dta_rec.dtrd_offset +
9868 prev->dta_rec.dtrd_size;
9869 } else {
9870 offs = sizeof (dtrace_epid_t);
9871 }
9872 wastuple = 0;
9873 } else {
9874 if (!act->dta_intuple)
9875 ecb->dte_size = offs + rec->dtrd_size;
9876
9877 offs += rec->dtrd_size;
9878 }
9879
9880 wastuple = act->dta_intuple;
9881 }
9882
9883 if ((act = ecb->dte_action) != NULL &&
9884 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9885 ecb->dte_size == sizeof (dtrace_epid_t)) {
9886 /*
9887 * If the size is still sizeof (dtrace_epid_t), then all
9888 * actions store no data; set the size to 0.
9889 */
9890 ecb->dte_alignment = maxalign;
9891 ecb->dte_size = 0;
9892
9893 /*
9894 * If the needed space is still sizeof (dtrace_epid_t), then
9895 * all actions need no additional space; set the needed
9896 * size to 0.
9897 */
9898 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9899 ecb->dte_needed = 0;
9900
9901 return;
9902 }
9903
9904 /*
9905 * Set our alignment, and make sure that the dte_size and dte_needed
9906 * are aligned to the size of an EPID.
9907 */
9908 ecb->dte_alignment = maxalign;
9909 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9910 ~(sizeof (dtrace_epid_t) - 1);
9911 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9912 ~(sizeof (dtrace_epid_t) - 1);
9913 ASSERT(ecb->dte_size <= ecb->dte_needed);
9914 }
9915
9916 static dtrace_action_t *
9917 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9918 {
9919 dtrace_aggregation_t *agg;
9920 size_t size = sizeof (uint64_t);
9921 int ntuple = desc->dtad_ntuple;
9922 dtrace_action_t *act;
9923 dtrace_recdesc_t *frec;
9924 dtrace_aggid_t aggid;
9925 dtrace_state_t *state = ecb->dte_state;
9926 vmem_addr_t offset;
9927
9928 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9929 agg->dtag_ecb = ecb;
9930
9931 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9932
9933 switch (desc->dtad_kind) {
9934 case DTRACEAGG_MIN:
9935 agg->dtag_initial = INT64_MAX;
9936 agg->dtag_aggregate = dtrace_aggregate_min;
9937 break;
9938
9939 case DTRACEAGG_MAX:
9940 agg->dtag_initial = INT64_MIN;
9941 agg->dtag_aggregate = dtrace_aggregate_max;
9942 break;
9943
9944 case DTRACEAGG_COUNT:
9945 agg->dtag_aggregate = dtrace_aggregate_count;
9946 break;
9947
9948 case DTRACEAGG_QUANTIZE:
9949 agg->dtag_aggregate = dtrace_aggregate_quantize;
9950 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9951 sizeof (uint64_t);
9952 break;
9953
9954 case DTRACEAGG_LQUANTIZE: {
9955 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9956 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9957
9958 agg->dtag_initial = desc->dtad_arg;
9959 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9960
9961 if (step == 0 || levels == 0)
9962 goto err;
9963
9964 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9965 break;
9966 }
9967
9968 case DTRACEAGG_AVG:
9969 agg->dtag_aggregate = dtrace_aggregate_avg;
9970 size = sizeof (uint64_t) * 2;
9971 break;
9972
9973 case DTRACEAGG_STDDEV:
9974 agg->dtag_aggregate = dtrace_aggregate_stddev;
9975 size = sizeof (uint64_t) * 4;
9976 break;
9977
9978 case DTRACEAGG_SUM:
9979 agg->dtag_aggregate = dtrace_aggregate_sum;
9980 break;
9981
9982 default:
9983 goto err;
9984 }
9985
9986 agg->dtag_action.dta_rec.dtrd_size = size;
9987
9988 if (ntuple == 0)
9989 goto err;
9990
9991 /*
9992 * We must make sure that we have enough actions for the n-tuple.
9993 */
9994 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9995 if (DTRACEACT_ISAGG(act->dta_kind))
9996 break;
9997
9998 if (--ntuple == 0) {
9999 /*
10000 * This is the action with which our n-tuple begins.
10001 */
10002 agg->dtag_first = act;
10003 goto success;
10004 }
10005 }
10006
10007 /*
10008 * This n-tuple is short by ntuple elements. Return failure.
10009 */
10010 ASSERT(ntuple != 0);
10011 err:
10012 kmem_free(agg, sizeof (dtrace_aggregation_t));
10013 return (NULL);
10014
10015 success:
10016 /*
10017 * If the last action in the tuple has a size of zero, it's actually
10018 * an expression argument for the aggregating action.
10019 */
10020 ASSERT(ecb->dte_action_last != NULL);
10021 act = ecb->dte_action_last;
10022
10023 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10024 ASSERT(act->dta_difo != NULL);
10025
10026 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10027 agg->dtag_hasarg = 1;
10028 }
10029
10030 /*
10031 * We need to allocate an id for this aggregation.
10032 */
10033 if (vmem_alloc(state->dts_aggid_arena, 1, VM_BESTFIT | VM_SLEEP,
10034 &offset) != 0)
10035 ASSERT(0);
10036 aggid = (dtrace_aggid_t)(uintptr_t)offset;
10037
10038
10039 if (aggid - 1 >= state->dts_naggregations) {
10040 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10041 dtrace_aggregation_t **aggs;
10042 int naggs = state->dts_naggregations << 1;
10043 int onaggs = state->dts_naggregations;
10044
10045 ASSERT(aggid == state->dts_naggregations + 1);
10046
10047 if (naggs == 0) {
10048 ASSERT(oaggs == NULL);
10049 naggs = 1;
10050 }
10051
10052 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10053
10054 if (oaggs != NULL) {
10055 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10056 kmem_free(oaggs, onaggs * sizeof (*aggs));
10057 }
10058
10059 state->dts_aggregations = aggs;
10060 state->dts_naggregations = naggs;
10061 }
10062
10063 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10064 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10065
10066 frec = &agg->dtag_first->dta_rec;
10067 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10068 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10069
10070 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10071 ASSERT(!act->dta_intuple);
10072 act->dta_intuple = 1;
10073 }
10074
10075 return (&agg->dtag_action);
10076 }
10077
10078 static void
10079 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10080 {
10081 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10082 dtrace_state_t *state = ecb->dte_state;
10083 dtrace_aggid_t aggid = agg->dtag_id;
10084
10085 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10086 #if defined(sun)
10087 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10088 #else
10089 vmem_free(state->dts_aggid_arena, (uintptr_t)aggid, 1);
10090 #endif
10091
10092 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10093 state->dts_aggregations[aggid - 1] = NULL;
10094
10095 kmem_free(agg, sizeof (dtrace_aggregation_t));
10096 }
10097
10098 static int
10099 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10100 {
10101 dtrace_action_t *action, *last;
10102 dtrace_difo_t *dp = desc->dtad_difo;
10103 uint32_t size = 0, align = sizeof (uint8_t), mask;
10104 uint16_t format = 0;
10105 dtrace_recdesc_t *rec;
10106 dtrace_state_t *state = ecb->dte_state;
10107 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
10108 uint64_t arg = desc->dtad_arg;
10109
10110 ASSERT(MUTEX_HELD(&dtrace_lock));
10111 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10112
10113 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10114 /*
10115 * If this is an aggregating action, there must be neither
10116 * a speculate nor a commit on the action chain.
10117 */
10118 dtrace_action_t *act;
10119
10120 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10121 if (act->dta_kind == DTRACEACT_COMMIT)
10122 return (EINVAL);
10123
10124 if (act->dta_kind == DTRACEACT_SPECULATE)
10125 return (EINVAL);
10126 }
10127
10128 action = dtrace_ecb_aggregation_create(ecb, desc);
10129
10130 if (action == NULL)
10131 return (EINVAL);
10132 } else {
10133 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10134 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10135 dp != NULL && dp->dtdo_destructive)) {
10136 state->dts_destructive = 1;
10137 }
10138
10139 switch (desc->dtad_kind) {
10140 case DTRACEACT_PRINTF:
10141 case DTRACEACT_PRINTA:
10142 case DTRACEACT_SYSTEM:
10143 case DTRACEACT_FREOPEN:
10144 /*
10145 * We know that our arg is a string -- turn it into a
10146 * format.
10147 */
10148 if (arg == 0) {
10149 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
10150 format = 0;
10151 } else {
10152 ASSERT(arg != 0);
10153 #if defined(sun)
10154 ASSERT(arg > KERNELBASE);
10155 #endif
10156 format = dtrace_format_add(state,
10157 (char *)(uintptr_t)arg);
10158 }
10159
10160 /*FALLTHROUGH*/
10161 case DTRACEACT_LIBACT:
10162 case DTRACEACT_DIFEXPR:
10163 if (dp == NULL)
10164 return (EINVAL);
10165
10166 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10167 break;
10168
10169 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10170 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10171 return (EINVAL);
10172
10173 size = opt[DTRACEOPT_STRSIZE];
10174 }
10175
10176 break;
10177
10178 case DTRACEACT_STACK:
10179 if ((nframes = arg) == 0) {
10180 nframes = opt[DTRACEOPT_STACKFRAMES];
10181 ASSERT(nframes > 0);
10182 arg = nframes;
10183 }
10184
10185 size = nframes * sizeof (pc_t);
10186 break;
10187
10188 case DTRACEACT_JSTACK:
10189 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10190 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10191
10192 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10193 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10194
10195 arg = DTRACE_USTACK_ARG(nframes, strsize);
10196
10197 /*FALLTHROUGH*/
10198 case DTRACEACT_USTACK:
10199 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10200 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10201 strsize = DTRACE_USTACK_STRSIZE(arg);
10202 nframes = opt[DTRACEOPT_USTACKFRAMES];
10203 ASSERT(nframes > 0);
10204 arg = DTRACE_USTACK_ARG(nframes, strsize);
10205 }
10206
10207 /*
10208 * Save a slot for the pid.
10209 */
10210 size = (nframes + 1) * sizeof (uint64_t);
10211 size += DTRACE_USTACK_STRSIZE(arg);
10212 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10213
10214 break;
10215
10216 case DTRACEACT_SYM:
10217 case DTRACEACT_MOD:
10218 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10219 sizeof (uint64_t)) ||
10220 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10221 return (EINVAL);
10222 break;
10223
10224 case DTRACEACT_USYM:
10225 case DTRACEACT_UMOD:
10226 case DTRACEACT_UADDR:
10227 if (dp == NULL ||
10228 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10229 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10230 return (EINVAL);
10231
10232 /*
10233 * We have a slot for the pid, plus a slot for the
10234 * argument. To keep things simple (aligned with
10235 * bitness-neutral sizing), we store each as a 64-bit
10236 * quantity.
10237 */
10238 size = 2 * sizeof (uint64_t);
10239 break;
10240
10241 case DTRACEACT_STOP:
10242 case DTRACEACT_BREAKPOINT:
10243 case DTRACEACT_PANIC:
10244 break;
10245
10246 case DTRACEACT_CHILL:
10247 case DTRACEACT_DISCARD:
10248 case DTRACEACT_RAISE:
10249 if (dp == NULL)
10250 return (EINVAL);
10251 break;
10252
10253 case DTRACEACT_EXIT:
10254 if (dp == NULL ||
10255 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10256 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10257 return (EINVAL);
10258 break;
10259
10260 case DTRACEACT_SPECULATE:
10261 if (ecb->dte_size > sizeof (dtrace_epid_t))
10262 return (EINVAL);
10263
10264 if (dp == NULL)
10265 return (EINVAL);
10266
10267 state->dts_speculates = 1;
10268 break;
10269
10270 case DTRACEACT_PRINTM:
10271 size = dp->dtdo_rtype.dtdt_size;
10272 break;
10273
10274 case DTRACEACT_PRINTT:
10275 size = dp->dtdo_rtype.dtdt_size;
10276 break;
10277
10278 case DTRACEACT_COMMIT: {
10279 dtrace_action_t *act = ecb->dte_action;
10280
10281 for (; act != NULL; act = act->dta_next) {
10282 if (act->dta_kind == DTRACEACT_COMMIT)
10283 return (EINVAL);
10284 }
10285
10286 if (dp == NULL)
10287 return (EINVAL);
10288 break;
10289 }
10290
10291 default:
10292 return (EINVAL);
10293 }
10294
10295 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10296 /*
10297 * If this is a data-storing action or a speculate,
10298 * we must be sure that there isn't a commit on the
10299 * action chain.
10300 */
10301 dtrace_action_t *act = ecb->dte_action;
10302
10303 for (; act != NULL; act = act->dta_next) {
10304 if (act->dta_kind == DTRACEACT_COMMIT)
10305 return (EINVAL);
10306 }
10307 }
10308
10309 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10310 action->dta_rec.dtrd_size = size;
10311 }
10312
10313 action->dta_refcnt = 1;
10314 rec = &action->dta_rec;
10315 size = rec->dtrd_size;
10316
10317 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10318 if (!(size & mask)) {
10319 align = mask + 1;
10320 break;
10321 }
10322 }
10323
10324 action->dta_kind = desc->dtad_kind;
10325
10326 if ((action->dta_difo = dp) != NULL)
10327 dtrace_difo_hold(dp);
10328
10329 rec->dtrd_action = action->dta_kind;
10330 rec->dtrd_arg = arg;
10331 rec->dtrd_uarg = desc->dtad_uarg;
10332 rec->dtrd_alignment = (uint16_t)align;
10333 rec->dtrd_format = format;
10334
10335 if ((last = ecb->dte_action_last) != NULL) {
10336 ASSERT(ecb->dte_action != NULL);
10337 action->dta_prev = last;
10338 last->dta_next = action;
10339 } else {
10340 ASSERT(ecb->dte_action == NULL);
10341 ecb->dte_action = action;
10342 }
10343
10344 ecb->dte_action_last = action;
10345
10346 return (0);
10347 }
10348
10349 static void
10350 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10351 {
10352 dtrace_action_t *act = ecb->dte_action, *next;
10353 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10354 dtrace_difo_t *dp;
10355 uint16_t format;
10356
10357 if (act != NULL && act->dta_refcnt > 1) {
10358 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10359 act->dta_refcnt--;
10360 } else {
10361 for (; act != NULL; act = next) {
10362 next = act->dta_next;
10363 ASSERT(next != NULL || act == ecb->dte_action_last);
10364 ASSERT(act->dta_refcnt == 1);
10365
10366 if ((format = act->dta_rec.dtrd_format) != 0)
10367 dtrace_format_remove(ecb->dte_state, format);
10368
10369 if ((dp = act->dta_difo) != NULL)
10370 dtrace_difo_release(dp, vstate);
10371
10372 if (DTRACEACT_ISAGG(act->dta_kind)) {
10373 dtrace_ecb_aggregation_destroy(ecb, act);
10374 } else {
10375 kmem_free(act, sizeof (dtrace_action_t));
10376 }
10377 }
10378 }
10379
10380 ecb->dte_action = NULL;
10381 ecb->dte_action_last = NULL;
10382 ecb->dte_size = sizeof (dtrace_epid_t);
10383 }
10384
10385 static void
10386 dtrace_ecb_disable(dtrace_ecb_t *ecb)
10387 {
10388 /*
10389 * We disable the ECB by removing it from its probe.
10390 */
10391 dtrace_ecb_t *pecb, *prev = NULL;
10392 dtrace_probe_t *probe = ecb->dte_probe;
10393
10394 ASSERT(MUTEX_HELD(&dtrace_lock));
10395
10396 if (probe == NULL) {
10397 /*
10398 * This is the NULL probe; there is nothing to disable.
10399 */
10400 return;
10401 }
10402
10403 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10404 if (pecb == ecb)
10405 break;
10406 prev = pecb;
10407 }
10408
10409 ASSERT(pecb != NULL);
10410
10411 if (prev == NULL) {
10412 probe->dtpr_ecb = ecb->dte_next;
10413 } else {
10414 prev->dte_next = ecb->dte_next;
10415 }
10416
10417 if (ecb == probe->dtpr_ecb_last) {
10418 ASSERT(ecb->dte_next == NULL);
10419 probe->dtpr_ecb_last = prev;
10420 }
10421
10422 /*
10423 * The ECB has been disconnected from the probe; now sync to assure
10424 * that all CPUs have seen the change before returning.
10425 */
10426 dtrace_sync();
10427
10428 if (probe->dtpr_ecb == NULL) {
10429 /*
10430 * That was the last ECB on the probe; clear the predicate
10431 * cache ID for the probe, disable it and sync one more time
10432 * to assure that we'll never hit it again.
10433 */
10434 dtrace_provider_t *prov = probe->dtpr_provider;
10435
10436 ASSERT(ecb->dte_next == NULL);
10437 ASSERT(probe->dtpr_ecb_last == NULL);
10438 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10439 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10440 probe->dtpr_id, probe->dtpr_arg);
10441 dtrace_sync();
10442 } else {
10443 /*
10444 * There is at least one ECB remaining on the probe. If there
10445 * is _exactly_ one, set the probe's predicate cache ID to be
10446 * the predicate cache ID of the remaining ECB.
10447 */
10448 ASSERT(probe->dtpr_ecb_last != NULL);
10449 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10450
10451 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10452 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10453
10454 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10455
10456 if (p != NULL)
10457 probe->dtpr_predcache = p->dtp_cacheid;
10458 }
10459
10460 ecb->dte_next = NULL;
10461 }
10462 }
10463
10464 static void
10465 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10466 {
10467 dtrace_state_t *state = ecb->dte_state;
10468 dtrace_vstate_t *vstate = &state->dts_vstate;
10469 dtrace_predicate_t *pred;
10470 dtrace_epid_t epid = ecb->dte_epid;
10471
10472 ASSERT(MUTEX_HELD(&dtrace_lock));
10473 ASSERT(ecb->dte_next == NULL);
10474 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10475
10476 if ((pred = ecb->dte_predicate) != NULL)
10477 dtrace_predicate_release(pred, vstate);
10478
10479 dtrace_ecb_action_remove(ecb);
10480
10481 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10482 state->dts_ecbs[epid - 1] = NULL;
10483
10484 kmem_free(ecb, sizeof (dtrace_ecb_t));
10485 }
10486
10487 static dtrace_ecb_t *
10488 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10489 dtrace_enabling_t *enab)
10490 {
10491 dtrace_ecb_t *ecb;
10492 dtrace_predicate_t *pred;
10493 dtrace_actdesc_t *act;
10494 dtrace_provider_t *prov;
10495 dtrace_ecbdesc_t *desc = enab->dten_current;
10496
10497 ASSERT(MUTEX_HELD(&dtrace_lock));
10498 ASSERT(state != NULL);
10499
10500 ecb = dtrace_ecb_add(state, probe);
10501 ecb->dte_uarg = desc->dted_uarg;
10502
10503 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10504 dtrace_predicate_hold(pred);
10505 ecb->dte_predicate = pred;
10506 }
10507
10508 if (probe != NULL) {
10509 /*
10510 * If the provider shows more leg than the consumer is old
10511 * enough to see, we need to enable the appropriate implicit
10512 * predicate bits to prevent the ecb from activating at
10513 * revealing times.
10514 *
10515 * Providers specifying DTRACE_PRIV_USER at register time
10516 * are stating that they need the /proc-style privilege
10517 * model to be enforced, and this is what DTRACE_COND_OWNER
10518 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10519 */
10520 prov = probe->dtpr_provider;
10521 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10522 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10523 ecb->dte_cond |= DTRACE_COND_OWNER;
10524
10525 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10526 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10527 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10528
10529 /*
10530 * If the provider shows us kernel innards and the user
10531 * is lacking sufficient privilege, enable the
10532 * DTRACE_COND_USERMODE implicit predicate.
10533 */
10534 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10535 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10536 ecb->dte_cond |= DTRACE_COND_USERMODE;
10537 }
10538
10539 if (dtrace_ecb_create_cache != NULL) {
10540 /*
10541 * If we have a cached ecb, we'll use its action list instead
10542 * of creating our own (saving both time and space).
10543 */
10544 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10545 dtrace_action_t *xact = cached->dte_action;
10546
10547 if (xact != NULL) {
10548 ASSERT(xact->dta_refcnt > 0);
10549 xact->dta_refcnt++;
10550 ecb->dte_action = xact;
10551 ecb->dte_action_last = cached->dte_action_last;
10552 ecb->dte_needed = cached->dte_needed;
10553 ecb->dte_size = cached->dte_size;
10554 ecb->dte_alignment = cached->dte_alignment;
10555 }
10556
10557 return (ecb);
10558 }
10559
10560 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10561 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10562 dtrace_ecb_destroy(ecb);
10563 return (NULL);
10564 }
10565 }
10566
10567 dtrace_ecb_resize(ecb);
10568
10569 return (dtrace_ecb_create_cache = ecb);
10570 }
10571
10572 static int
10573 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10574 {
10575 dtrace_ecb_t *ecb;
10576 dtrace_enabling_t *enab = arg;
10577 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10578
10579 ASSERT(state != NULL);
10580
10581 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10582 /*
10583 * This probe was created in a generation for which this
10584 * enabling has previously created ECBs; we don't want to
10585 * enable it again, so just kick out.
10586 */
10587 return (DTRACE_MATCH_NEXT);
10588 }
10589
10590 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10591 return (DTRACE_MATCH_DONE);
10592
10593 if (dtrace_ecb_enable(ecb) < 0)
10594 return (DTRACE_MATCH_FAIL);
10595
10596 return (DTRACE_MATCH_NEXT);
10597 }
10598
10599 static dtrace_ecb_t *
10600 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10601 {
10602 dtrace_ecb_t *ecb;
10603
10604 ASSERT(MUTEX_HELD(&dtrace_lock));
10605
10606 if (id == 0 || id > state->dts_necbs)
10607 return (NULL);
10608
10609 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10610 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10611
10612 return (state->dts_ecbs[id - 1]);
10613 }
10614
10615 static dtrace_aggregation_t *
10616 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10617 {
10618 dtrace_aggregation_t *agg;
10619
10620 ASSERT(MUTEX_HELD(&dtrace_lock));
10621
10622 if (id == 0 || id > state->dts_naggregations)
10623 return (NULL);
10624
10625 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10626 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10627 agg->dtag_id == id);
10628
10629 return (state->dts_aggregations[id - 1]);
10630 }
10631
10632 /*
10633 * DTrace Buffer Functions
10634 *
10635 * The following functions manipulate DTrace buffers. Most of these functions
10636 * are called in the context of establishing or processing consumer state;
10637 * exceptions are explicitly noted.
10638 */
10639
10640 /*
10641 * Note: called from cross call context. This function switches the two
10642 * buffers on a given CPU. The atomicity of this operation is assured by
10643 * disabling interrupts while the actual switch takes place; the disabling of
10644 * interrupts serializes the execution with any execution of dtrace_probe() on
10645 * the same CPU.
10646 */
10647 static void
10648 dtrace_buffer_switch(dtrace_buffer_t *buf)
10649 {
10650 caddr_t tomax = buf->dtb_tomax;
10651 caddr_t xamot = buf->dtb_xamot;
10652 dtrace_icookie_t cookie;
10653
10654 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10655 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10656
10657 cookie = dtrace_interrupt_disable();
10658 buf->dtb_tomax = xamot;
10659 buf->dtb_xamot = tomax;
10660 buf->dtb_xamot_drops = buf->dtb_drops;
10661 buf->dtb_xamot_offset = buf->dtb_offset;
10662 buf->dtb_xamot_errors = buf->dtb_errors;
10663 buf->dtb_xamot_flags = buf->dtb_flags;
10664 buf->dtb_offset = 0;
10665 buf->dtb_drops = 0;
10666 buf->dtb_errors = 0;
10667 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10668 dtrace_interrupt_enable(cookie);
10669 }
10670
10671 /*
10672 * Note: called from cross call context. This function activates a buffer
10673 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10674 * is guaranteed by the disabling of interrupts.
10675 */
10676 static void
10677 dtrace_buffer_activate(dtrace_state_t *state)
10678 {
10679 dtrace_buffer_t *buf;
10680 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10681
10682 buf = &state->dts_buffer[curcpu_id];
10683
10684 if (buf->dtb_tomax != NULL) {
10685 /*
10686 * We might like to assert that the buffer is marked inactive,
10687 * but this isn't necessarily true: the buffer for the CPU
10688 * that processes the BEGIN probe has its buffer activated
10689 * manually. In this case, we take the (harmless) action
10690 * re-clearing the bit INACTIVE bit.
10691 */
10692 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10693 }
10694
10695 dtrace_interrupt_enable(cookie);
10696 }
10697
10698 static int
10699 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10700 processorid_t cpu)
10701 {
10702 #if defined(sun)
10703 cpu_t *cp;
10704 #else
10705 CPU_INFO_ITERATOR cpuind;
10706 struct cpu_info *cinfo;
10707 #endif
10708 dtrace_buffer_t *buf;
10709
10710 #if defined(sun)
10711 ASSERT(MUTEX_HELD(&cpu_lock));
10712 ASSERT(MUTEX_HELD(&dtrace_lock));
10713
10714 if (size > dtrace_nonroot_maxsize &&
10715 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10716 return (EFBIG);
10717
10718 cp = cpu_list;
10719
10720 do {
10721 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10722 continue;
10723
10724 buf = &bufs[cp->cpu_id];
10725
10726 /*
10727 * If there is already a buffer allocated for this CPU, it
10728 * is only possible that this is a DR event. In this case,
10729 */
10730 if (buf->dtb_tomax != NULL) {
10731 ASSERT(buf->dtb_size == size);
10732 continue;
10733 }
10734
10735 ASSERT(buf->dtb_xamot == NULL);
10736
10737 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10738 goto err;
10739
10740 buf->dtb_size = size;
10741 buf->dtb_flags = flags;
10742 buf->dtb_offset = 0;
10743 buf->dtb_drops = 0;
10744
10745 if (flags & DTRACEBUF_NOSWITCH)
10746 continue;
10747
10748 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10749 goto err;
10750 } while ((cp = cp->cpu_next) != cpu_list);
10751
10752 return (0);
10753
10754 err:
10755 cp = cpu_list;
10756
10757 do {
10758 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10759 continue;
10760
10761 buf = &bufs[cp->cpu_id];
10762
10763 if (buf->dtb_xamot != NULL) {
10764 ASSERT(buf->dtb_tomax != NULL);
10765 ASSERT(buf->dtb_size == size);
10766 kmem_free(buf->dtb_xamot, size);
10767 }
10768
10769 if (buf->dtb_tomax != NULL) {
10770 ASSERT(buf->dtb_size == size);
10771 kmem_free(buf->dtb_tomax, size);
10772 }
10773
10774 buf->dtb_tomax = NULL;
10775 buf->dtb_xamot = NULL;
10776 buf->dtb_size = 0;
10777 } while ((cp = cp->cpu_next) != cpu_list);
10778
10779 return (ENOMEM);
10780 #else
10781
10782 #if defined(__amd64__)
10783 /*
10784 * FreeBSD isn't good at limiting the amount of memory we
10785 * ask to malloc, so let's place a limit here before trying
10786 * to do something that might well end in tears at bedtime.
10787 */
10788 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
10789 return(ENOMEM);
10790 #endif
10791
10792 ASSERT(MUTEX_HELD(&dtrace_lock));
10793 for (CPU_INFO_FOREACH(cpuind, cinfo)) {
10794 if (cpu != DTRACE_CPUALL && cpu != cpu_index(cinfo))
10795 continue;
10796
10797 buf = &bufs[cpu_index(cinfo)];
10798
10799 /*
10800 * If there is already a buffer allocated for this CPU, it
10801 * is only possible that this is a DR event. In this case,
10802 * the buffer size must match our specified size.
10803 */
10804 if (buf->dtb_tomax != NULL) {
10805 ASSERT(buf->dtb_size == size);
10806 continue;
10807 }
10808
10809 ASSERT(buf->dtb_xamot == NULL);
10810
10811 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10812 goto err;
10813
10814 buf->dtb_size = size;
10815 buf->dtb_flags = flags;
10816 buf->dtb_offset = 0;
10817 buf->dtb_drops = 0;
10818
10819 if (flags & DTRACEBUF_NOSWITCH)
10820 continue;
10821
10822 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10823 goto err;
10824 }
10825
10826 return (0);
10827
10828 err:
10829 /*
10830 * Error allocating memory, so free the buffers that were
10831 * allocated before the failed allocation.
10832 */
10833 for (CPU_INFO_FOREACH(cpuind, cinfo)) {
10834 if (cpu != DTRACE_CPUALL && cpu != cpu_index(cinfo))
10835 continue;
10836
10837 buf = &bufs[cpu_index(cinfo)];
10838
10839 if (buf->dtb_xamot != NULL) {
10840 ASSERT(buf->dtb_tomax != NULL);
10841 ASSERT(buf->dtb_size == size);
10842 kmem_free(buf->dtb_xamot, size);
10843 }
10844
10845 if (buf->dtb_tomax != NULL) {
10846 ASSERT(buf->dtb_size == size);
10847 kmem_free(buf->dtb_tomax, size);
10848 }
10849
10850 buf->dtb_tomax = NULL;
10851 buf->dtb_xamot = NULL;
10852 buf->dtb_size = 0;
10853
10854 }
10855
10856 return (ENOMEM);
10857 #endif
10858 }
10859
10860 /*
10861 * Note: called from probe context. This function just increments the drop
10862 * count on a buffer. It has been made a function to allow for the
10863 * possibility of understanding the source of mysterious drop counts. (A
10864 * problem for which one may be particularly disappointed that DTrace cannot
10865 * be used to understand DTrace.)
10866 */
10867 static void
10868 dtrace_buffer_drop(dtrace_buffer_t *buf)
10869 {
10870 buf->dtb_drops++;
10871 }
10872
10873 /*
10874 * Note: called from probe context. This function is called to reserve space
10875 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10876 * mstate. Returns the new offset in the buffer, or a negative value if an
10877 * error has occurred.
10878 */
10879 static intptr_t
10880 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10881 dtrace_state_t *state, dtrace_mstate_t *mstate)
10882 {
10883 intptr_t offs = buf->dtb_offset, soffs;
10884 intptr_t woffs;
10885 caddr_t tomax;
10886 size_t total;
10887
10888 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10889 return (-1);
10890
10891 if ((tomax = buf->dtb_tomax) == NULL) {
10892 dtrace_buffer_drop(buf);
10893 return (-1);
10894 }
10895
10896 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10897 while (offs & (align - 1)) {
10898 /*
10899 * Assert that our alignment is off by a number which
10900 * is itself sizeof (uint32_t) aligned.
10901 */
10902 ASSERT(!((align - (offs & (align - 1))) &
10903 (sizeof (uint32_t) - 1)));
10904 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10905 offs += sizeof (uint32_t);
10906 }
10907
10908 if ((soffs = offs + needed) > buf->dtb_size) {
10909 dtrace_buffer_drop(buf);
10910 return (-1);
10911 }
10912
10913 if (mstate == NULL)
10914 return (offs);
10915
10916 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10917 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10918 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10919
10920 return (offs);
10921 }
10922
10923 if (buf->dtb_flags & DTRACEBUF_FILL) {
10924 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10925 (buf->dtb_flags & DTRACEBUF_FULL))
10926 return (-1);
10927 goto out;
10928 }
10929
10930 total = needed + (offs & (align - 1));
10931
10932 /*
10933 * For a ring buffer, life is quite a bit more complicated. Before
10934 * we can store any padding, we need to adjust our wrapping offset.
10935 * (If we've never before wrapped or we're not about to, no adjustment
10936 * is required.)
10937 */
10938 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10939 offs + total > buf->dtb_size) {
10940 woffs = buf->dtb_xamot_offset;
10941
10942 if (offs + total > buf->dtb_size) {
10943 /*
10944 * We can't fit in the end of the buffer. First, a
10945 * sanity check that we can fit in the buffer at all.
10946 */
10947 if (total > buf->dtb_size) {
10948 dtrace_buffer_drop(buf);
10949 return (-1);
10950 }
10951
10952 /*
10953 * We're going to be storing at the top of the buffer,
10954 * so now we need to deal with the wrapped offset. We
10955 * only reset our wrapped offset to 0 if it is
10956 * currently greater than the current offset. If it
10957 * is less than the current offset, it is because a
10958 * previous allocation induced a wrap -- but the
10959 * allocation didn't subsequently take the space due
10960 * to an error or false predicate evaluation. In this
10961 * case, we'll just leave the wrapped offset alone: if
10962 * the wrapped offset hasn't been advanced far enough
10963 * for this allocation, it will be adjusted in the
10964 * lower loop.
10965 */
10966 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10967 if (woffs >= offs)
10968 woffs = 0;
10969 } else {
10970 woffs = 0;
10971 }
10972
10973 /*
10974 * Now we know that we're going to be storing to the
10975 * top of the buffer and that there is room for us
10976 * there. We need to clear the buffer from the current
10977 * offset to the end (there may be old gunk there).
10978 */
10979 while (offs < buf->dtb_size)
10980 tomax[offs++] = 0;
10981
10982 /*
10983 * We need to set our offset to zero. And because we
10984 * are wrapping, we need to set the bit indicating as
10985 * much. We can also adjust our needed space back
10986 * down to the space required by the ECB -- we know
10987 * that the top of the buffer is aligned.
10988 */
10989 offs = 0;
10990 total = needed;
10991 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10992 } else {
10993 /*
10994 * There is room for us in the buffer, so we simply
10995 * need to check the wrapped offset.
10996 */
10997 if (woffs < offs) {
10998 /*
10999 * The wrapped offset is less than the offset.
11000 * This can happen if we allocated buffer space
11001 * that induced a wrap, but then we didn't
11002 * subsequently take the space due to an error
11003 * or false predicate evaluation. This is
11004 * okay; we know that _this_ allocation isn't
11005 * going to induce a wrap. We still can't
11006 * reset the wrapped offset to be zero,
11007 * however: the space may have been trashed in
11008 * the previous failed probe attempt. But at
11009 * least the wrapped offset doesn't need to
11010 * be adjusted at all...
11011 */
11012 goto out;
11013 }
11014 }
11015
11016 while (offs + total > woffs) {
11017 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11018 size_t size;
11019
11020 if (epid == DTRACE_EPIDNONE) {
11021 size = sizeof (uint32_t);
11022 } else {
11023 ASSERT(epid <= state->dts_necbs);
11024 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11025
11026 size = state->dts_ecbs[epid - 1]->dte_size;
11027 }
11028
11029 ASSERT(woffs + size <= buf->dtb_size);
11030 ASSERT(size != 0);
11031
11032 if (woffs + size == buf->dtb_size) {
11033 /*
11034 * We've reached the end of the buffer; we want
11035 * to set the wrapped offset to 0 and break
11036 * out. However, if the offs is 0, then we're
11037 * in a strange edge-condition: the amount of
11038 * space that we want to reserve plus the size
11039 * of the record that we're overwriting is
11040 * greater than the size of the buffer. This
11041 * is problematic because if we reserve the
11042 * space but subsequently don't consume it (due
11043 * to a failed predicate or error) the wrapped
11044 * offset will be 0 -- yet the EPID at offset 0
11045 * will not be committed. This situation is
11046 * relatively easy to deal with: if we're in
11047 * this case, the buffer is indistinguishable
11048 * from one that hasn't wrapped; we need only
11049 * finish the job by clearing the wrapped bit,
11050 * explicitly setting the offset to be 0, and
11051 * zero'ing out the old data in the buffer.
11052 */
11053 if (offs == 0) {
11054 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11055 buf->dtb_offset = 0;
11056 woffs = total;
11057
11058 while (woffs < buf->dtb_size)
11059 tomax[woffs++] = 0;
11060 }
11061
11062 woffs = 0;
11063 break;
11064 }
11065
11066 woffs += size;
11067 }
11068
11069 /*
11070 * We have a wrapped offset. It may be that the wrapped offset
11071 * has become zero -- that's okay.
11072 */
11073 buf->dtb_xamot_offset = woffs;
11074 }
11075
11076 out:
11077 /*
11078 * Now we can plow the buffer with any necessary padding.
11079 */
11080 while (offs & (align - 1)) {
11081 /*
11082 * Assert that our alignment is off by a number which
11083 * is itself sizeof (uint32_t) aligned.
11084 */
11085 ASSERT(!((align - (offs & (align - 1))) &
11086 (sizeof (uint32_t) - 1)));
11087 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11088 offs += sizeof (uint32_t);
11089 }
11090
11091 if (buf->dtb_flags & DTRACEBUF_FILL) {
11092 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11093 buf->dtb_flags |= DTRACEBUF_FULL;
11094 return (-1);
11095 }
11096 }
11097
11098 if (mstate == NULL)
11099 return (offs);
11100
11101 /*
11102 * For ring buffers and fill buffers, the scratch space is always
11103 * the inactive buffer.
11104 */
11105 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11106 mstate->dtms_scratch_size = buf->dtb_size;
11107 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11108
11109 return (offs);
11110 }
11111
11112 static void
11113 dtrace_buffer_polish(dtrace_buffer_t *buf)
11114 {
11115 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11116 ASSERT(MUTEX_HELD(&dtrace_lock));
11117
11118 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11119 return;
11120
11121 /*
11122 * We need to polish the ring buffer. There are three cases:
11123 *
11124 * - The first (and presumably most common) is that there is no gap
11125 * between the buffer offset and the wrapped offset. In this case,
11126 * there is nothing in the buffer that isn't valid data; we can
11127 * mark the buffer as polished and return.
11128 *
11129 * - The second (less common than the first but still more common
11130 * than the third) is that there is a gap between the buffer offset
11131 * and the wrapped offset, and the wrapped offset is larger than the
11132 * buffer offset. This can happen because of an alignment issue, or
11133 * can happen because of a call to dtrace_buffer_reserve() that
11134 * didn't subsequently consume the buffer space. In this case,
11135 * we need to zero the data from the buffer offset to the wrapped
11136 * offset.
11137 *
11138 * - The third (and least common) is that there is a gap between the
11139 * buffer offset and the wrapped offset, but the wrapped offset is
11140 * _less_ than the buffer offset. This can only happen because a
11141 * call to dtrace_buffer_reserve() induced a wrap, but the space
11142 * was not subsequently consumed. In this case, we need to zero the
11143 * space from the offset to the end of the buffer _and_ from the
11144 * top of the buffer to the wrapped offset.
11145 */
11146 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11147 bzero(buf->dtb_tomax + buf->dtb_offset,
11148 buf->dtb_xamot_offset - buf->dtb_offset);
11149 }
11150
11151 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11152 bzero(buf->dtb_tomax + buf->dtb_offset,
11153 buf->dtb_size - buf->dtb_offset);
11154 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11155 }
11156 }
11157
11158 static void
11159 dtrace_buffer_free(dtrace_buffer_t *bufs)
11160 {
11161 int i;
11162
11163 for (i = 0; i < NCPU; i++) {
11164 dtrace_buffer_t *buf = &bufs[i];
11165
11166 if (buf->dtb_tomax == NULL) {
11167 ASSERT(buf->dtb_xamot == NULL);
11168 ASSERT(buf->dtb_size == 0);
11169 continue;
11170 }
11171
11172 if (buf->dtb_xamot != NULL) {
11173 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11174 kmem_free(buf->dtb_xamot, buf->dtb_size);
11175 }
11176
11177 kmem_free(buf->dtb_tomax, buf->dtb_size);
11178 buf->dtb_size = 0;
11179 buf->dtb_tomax = NULL;
11180 buf->dtb_xamot = NULL;
11181 }
11182 }
11183
11184 /*
11185 * DTrace Enabling Functions
11186 */
11187 static dtrace_enabling_t *
11188 dtrace_enabling_create(dtrace_vstate_t *vstate)
11189 {
11190 dtrace_enabling_t *enab;
11191
11192 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11193 enab->dten_vstate = vstate;
11194
11195 return (enab);
11196 }
11197
11198 static void
11199 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11200 {
11201 dtrace_ecbdesc_t **ndesc;
11202 size_t osize, nsize;
11203
11204 /*
11205 * We can't add to enablings after we've enabled them, or after we've
11206 * retained them.
11207 */
11208 ASSERT(enab->dten_probegen == 0);
11209 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11210
11211 if (enab->dten_ndesc < enab->dten_maxdesc) {
11212 enab->dten_desc[enab->dten_ndesc++] = ecb;
11213 return;
11214 }
11215
11216 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11217
11218 if (enab->dten_maxdesc == 0) {
11219 enab->dten_maxdesc = 1;
11220 } else {
11221 enab->dten_maxdesc <<= 1;
11222 }
11223
11224 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11225
11226 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11227 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11228 bcopy(enab->dten_desc, ndesc, osize);
11229 if (enab->dten_desc != NULL)
11230 kmem_free(enab->dten_desc, osize);
11231
11232 enab->dten_desc = ndesc;
11233 enab->dten_desc[enab->dten_ndesc++] = ecb;
11234 }
11235
11236 static void
11237 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11238 dtrace_probedesc_t *pd)
11239 {
11240 dtrace_ecbdesc_t *new;
11241 dtrace_predicate_t *pred;
11242 dtrace_actdesc_t *act;
11243
11244 /*
11245 * We're going to create a new ECB description that matches the
11246 * specified ECB in every way, but has the specified probe description.
11247 */
11248 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11249
11250 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11251 dtrace_predicate_hold(pred);
11252
11253 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11254 dtrace_actdesc_hold(act);
11255
11256 new->dted_action = ecb->dted_action;
11257 new->dted_pred = ecb->dted_pred;
11258 new->dted_probe = *pd;
11259 new->dted_uarg = ecb->dted_uarg;
11260
11261 dtrace_enabling_add(enab, new);
11262 }
11263
11264 static void
11265 dtrace_enabling_dump(dtrace_enabling_t *enab)
11266 {
11267 int i;
11268
11269 for (i = 0; i < enab->dten_ndesc; i++) {
11270 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11271
11272 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11273 desc->dtpd_provider, desc->dtpd_mod,
11274 desc->dtpd_func, desc->dtpd_name);
11275 }
11276 }
11277
11278 static void
11279 dtrace_enabling_destroy(dtrace_enabling_t *enab)
11280 {
11281 int i;
11282 dtrace_ecbdesc_t *ep;
11283 dtrace_vstate_t *vstate = enab->dten_vstate;
11284
11285 ASSERT(MUTEX_HELD(&dtrace_lock));
11286
11287 for (i = 0; i < enab->dten_ndesc; i++) {
11288 dtrace_actdesc_t *act, *next;
11289 dtrace_predicate_t *pred;
11290
11291 ep = enab->dten_desc[i];
11292
11293 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11294 dtrace_predicate_release(pred, vstate);
11295
11296 for (act = ep->dted_action; act != NULL; act = next) {
11297 next = act->dtad_next;
11298 dtrace_actdesc_release(act, vstate);
11299 }
11300
11301 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11302 }
11303
11304 if (enab->dten_desc != NULL)
11305 kmem_free(enab->dten_desc,
11306 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11307
11308 /*
11309 * If this was a retained enabling, decrement the dts_nretained count
11310 * and take it off of the dtrace_retained list.
11311 */
11312 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11313 dtrace_retained == enab) {
11314 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11315 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11316 enab->dten_vstate->dtvs_state->dts_nretained--;
11317 }
11318
11319 if (enab->dten_prev == NULL) {
11320 if (dtrace_retained == enab) {
11321 dtrace_retained = enab->dten_next;
11322
11323 if (dtrace_retained != NULL)
11324 dtrace_retained->dten_prev = NULL;
11325 }
11326 } else {
11327 ASSERT(enab != dtrace_retained);
11328 ASSERT(dtrace_retained != NULL);
11329 enab->dten_prev->dten_next = enab->dten_next;
11330 }
11331
11332 if (enab->dten_next != NULL) {
11333 ASSERT(dtrace_retained != NULL);
11334 enab->dten_next->dten_prev = enab->dten_prev;
11335 }
11336
11337 kmem_free(enab, sizeof (dtrace_enabling_t));
11338 }
11339
11340 static int
11341 dtrace_enabling_retain(dtrace_enabling_t *enab)
11342 {
11343 dtrace_state_t *state;
11344
11345 ASSERT(MUTEX_HELD(&dtrace_lock));
11346 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11347 ASSERT(enab->dten_vstate != NULL);
11348
11349 state = enab->dten_vstate->dtvs_state;
11350 ASSERT(state != NULL);
11351
11352 /*
11353 * We only allow each state to retain dtrace_retain_max enablings.
11354 */
11355 if (state->dts_nretained >= dtrace_retain_max)
11356 return (ENOSPC);
11357
11358 state->dts_nretained++;
11359
11360 if (dtrace_retained == NULL) {
11361 dtrace_retained = enab;
11362 return (0);
11363 }
11364
11365 enab->dten_next = dtrace_retained;
11366 dtrace_retained->dten_prev = enab;
11367 dtrace_retained = enab;
11368
11369 return (0);
11370 }
11371
11372 static int
11373 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11374 dtrace_probedesc_t *create)
11375 {
11376 dtrace_enabling_t *new, *enab;
11377 int found = 0, err = ENOENT;
11378
11379 ASSERT(MUTEX_HELD(&dtrace_lock));
11380 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11381 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11382 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11383 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11384
11385 new = dtrace_enabling_create(&state->dts_vstate);
11386
11387 /*
11388 * Iterate over all retained enablings, looking for enablings that
11389 * match the specified state.
11390 */
11391 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11392 int i;
11393
11394 /*
11395 * dtvs_state can only be NULL for helper enablings -- and
11396 * helper enablings can't be retained.
11397 */
11398 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11399
11400 if (enab->dten_vstate->dtvs_state != state)
11401 continue;
11402
11403 /*
11404 * Now iterate over each probe description; we're looking for
11405 * an exact match to the specified probe description.
11406 */
11407 for (i = 0; i < enab->dten_ndesc; i++) {
11408 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11409 dtrace_probedesc_t *pd = &ep->dted_probe;
11410
11411 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11412 continue;
11413
11414 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11415 continue;
11416
11417 if (strcmp(pd->dtpd_func, match->dtpd_func))
11418 continue;
11419
11420 if (strcmp(pd->dtpd_name, match->dtpd_name))
11421 continue;
11422
11423 /*
11424 * We have a winning probe! Add it to our growing
11425 * enabling.
11426 */
11427 found = 1;
11428 dtrace_enabling_addlike(new, ep, create);
11429 }
11430 }
11431
11432 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11433 dtrace_enabling_destroy(new);
11434 return (err);
11435 }
11436
11437 return (0);
11438 }
11439
11440 static void
11441 dtrace_enabling_retract(dtrace_state_t *state)
11442 {
11443 dtrace_enabling_t *enab, *next;
11444
11445 ASSERT(MUTEX_HELD(&dtrace_lock));
11446
11447 /*
11448 * Iterate over all retained enablings, destroy the enablings retained
11449 * for the specified state.
11450 */
11451 for (enab = dtrace_retained; enab != NULL; enab = next) {
11452 next = enab->dten_next;
11453
11454 /*
11455 * dtvs_state can only be NULL for helper enablings -- and
11456 * helper enablings can't be retained.
11457 */
11458 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11459
11460 if (enab->dten_vstate->dtvs_state == state) {
11461 ASSERT(state->dts_nretained > 0);
11462 dtrace_enabling_destroy(enab);
11463 }
11464 }
11465
11466 ASSERT(state->dts_nretained == 0);
11467 }
11468
11469 static int
11470 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11471 {
11472 int i = 0;
11473 int total_matched = 0, matched = 0;
11474
11475 ASSERT(MUTEX_HELD(&cpu_lock));
11476 ASSERT(MUTEX_HELD(&dtrace_lock));
11477
11478 for (i = 0; i < enab->dten_ndesc; i++) {
11479 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11480
11481 enab->dten_current = ep;
11482 enab->dten_error = 0;
11483
11484 /*
11485 * If a provider failed to enable a probe then get out and
11486 * let the consumer know we failed.
11487 */
11488 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
11489 return (EBUSY);
11490
11491 total_matched += matched;
11492
11493 if (enab->dten_error != 0) {
11494 /*
11495 * If we get an error half-way through enabling the
11496 * probes, we kick out -- perhaps with some number of
11497 * them enabled. Leaving enabled probes enabled may
11498 * be slightly confusing for user-level, but we expect
11499 * that no one will attempt to actually drive on in
11500 * the face of such errors. If this is an anonymous
11501 * enabling (indicated with a NULL nmatched pointer),
11502 * we cmn_err() a message. We aren't expecting to
11503 * get such an error -- such as it can exist at all,
11504 * it would be a result of corrupted DOF in the driver
11505 * properties.
11506 */
11507 if (nmatched == NULL) {
11508 cmn_err(CE_WARN, "dtrace_enabling_match() "
11509 "error on %p: %d", (void *)ep,
11510 enab->dten_error);
11511 }
11512
11513 return (enab->dten_error);
11514 }
11515 }
11516
11517 enab->dten_probegen = dtrace_probegen;
11518 if (nmatched != NULL)
11519 *nmatched = total_matched;
11520
11521 return (0);
11522 }
11523
11524 static void
11525 dtrace_enabling_matchall(void)
11526 {
11527 dtrace_enabling_t *enab;
11528
11529 mutex_enter(&cpu_lock);
11530 mutex_enter(&dtrace_lock);
11531
11532 /*
11533 * Iterate over all retained enablings to see if any probes match
11534 * against them. We only perform this operation on enablings for which
11535 * we have sufficient permissions by virtue of being in the global zone
11536 * or in the same zone as the DTrace client. Because we can be called
11537 * after dtrace_detach() has been called, we cannot assert that there
11538 * are retained enablings. We can safely load from dtrace_retained,
11539 * however: the taskq_destroy() at the end of dtrace_detach() will
11540 * block pending our completion.
11541 */
11542 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11543 #if defined(sun)
11544 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11545
11546 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr))
11547 #endif
11548 (void) dtrace_enabling_match(enab, NULL);
11549 }
11550
11551 mutex_exit(&dtrace_lock);
11552 mutex_exit(&cpu_lock);
11553 }
11554
11555 /*
11556 * If an enabling is to be enabled without having matched probes (that is, if
11557 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11558 * enabling must be _primed_ by creating an ECB for every ECB description.
11559 * This must be done to assure that we know the number of speculations, the
11560 * number of aggregations, the minimum buffer size needed, etc. before we
11561 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11562 * enabling any probes, we create ECBs for every ECB decription, but with a
11563 * NULL probe -- which is exactly what this function does.
11564 */
11565 static void
11566 dtrace_enabling_prime(dtrace_state_t *state)
11567 {
11568 dtrace_enabling_t *enab;
11569 int i;
11570
11571 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11572 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11573
11574 if (enab->dten_vstate->dtvs_state != state)
11575 continue;
11576
11577 /*
11578 * We don't want to prime an enabling more than once, lest
11579 * we allow a malicious user to induce resource exhaustion.
11580 * (The ECBs that result from priming an enabling aren't
11581 * leaked -- but they also aren't deallocated until the
11582 * consumer state is destroyed.)
11583 */
11584 if (enab->dten_primed)
11585 continue;
11586
11587 for (i = 0; i < enab->dten_ndesc; i++) {
11588 enab->dten_current = enab->dten_desc[i];
11589 (void) dtrace_probe_enable(NULL, enab);
11590 }
11591
11592 enab->dten_primed = 1;
11593 }
11594 }
11595
11596 /*
11597 * Called to indicate that probes should be provided due to retained
11598 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11599 * must take an initial lap through the enabling calling the dtps_provide()
11600 * entry point explicitly to allow for autocreated probes.
11601 */
11602 static void
11603 dtrace_enabling_provide(dtrace_provider_t *prv)
11604 {
11605 int i, all = 0;
11606 dtrace_probedesc_t desc;
11607
11608 ASSERT(MUTEX_HELD(&dtrace_lock));
11609 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11610
11611 if (prv == NULL) {
11612 all = 1;
11613 prv = dtrace_provider;
11614 }
11615
11616 do {
11617 dtrace_enabling_t *enab = dtrace_retained;
11618 void *parg = prv->dtpv_arg;
11619
11620 for (; enab != NULL; enab = enab->dten_next) {
11621 for (i = 0; i < enab->dten_ndesc; i++) {
11622 desc = enab->dten_desc[i]->dted_probe;
11623 mutex_exit(&dtrace_lock);
11624 prv->dtpv_pops.dtps_provide(parg, &desc);
11625 mutex_enter(&dtrace_lock);
11626 }
11627 }
11628 } while (all && (prv = prv->dtpv_next) != NULL);
11629
11630 mutex_exit(&dtrace_lock);
11631 dtrace_probe_provide(NULL, all ? NULL : prv);
11632 mutex_enter(&dtrace_lock);
11633 }
11634
11635 /*
11636 * DTrace DOF Functions
11637 */
11638 /*ARGSUSED*/
11639 static void
11640 dtrace_dof_error(dof_hdr_t *dof, const char *str)
11641 {
11642 if (dtrace_err_verbose)
11643 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11644
11645 #ifdef DTRACE_ERRDEBUG
11646 dtrace_errdebug(str);
11647 #endif
11648 }
11649
11650 /*
11651 * Create DOF out of a currently enabled state. Right now, we only create
11652 * DOF containing the run-time options -- but this could be expanded to create
11653 * complete DOF representing the enabled state.
11654 */
11655 static dof_hdr_t *
11656 dtrace_dof_create(dtrace_state_t *state)
11657 {
11658 dof_hdr_t *dof;
11659 dof_sec_t *sec;
11660 dof_optdesc_t *opt;
11661 int i, len = sizeof (dof_hdr_t) +
11662 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11663 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11664
11665 ASSERT(MUTEX_HELD(&dtrace_lock));
11666
11667 dof = kmem_zalloc(len, KM_SLEEP);
11668 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11669 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11670 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11671 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11672
11673 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11674 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11675 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11676 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11677 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11678 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11679
11680 dof->dofh_flags = 0;
11681 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11682 dof->dofh_secsize = sizeof (dof_sec_t);
11683 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11684 dof->dofh_secoff = sizeof (dof_hdr_t);
11685 dof->dofh_loadsz = len;
11686 dof->dofh_filesz = len;
11687 dof->dofh_pad = 0;
11688
11689 /*
11690 * Fill in the option section header...
11691 */
11692 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11693 sec->dofs_type = DOF_SECT_OPTDESC;
11694 sec->dofs_align = sizeof (uint64_t);
11695 sec->dofs_flags = DOF_SECF_LOAD;
11696 sec->dofs_entsize = sizeof (dof_optdesc_t);
11697
11698 opt = (dof_optdesc_t *)((uintptr_t)sec +
11699 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11700
11701 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11702 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11703
11704 for (i = 0; i < DTRACEOPT_MAX; i++) {
11705 opt[i].dofo_option = i;
11706 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11707 opt[i].dofo_value = state->dts_options[i];
11708 }
11709
11710 return (dof);
11711 }
11712
11713 static dof_hdr_t *
11714 dtrace_dof_copyin(uintptr_t uarg, int *errp)
11715 {
11716 dof_hdr_t hdr, *dof;
11717
11718 ASSERT(!MUTEX_HELD(&dtrace_lock));
11719
11720 /*
11721 * First, we're going to copyin() the sizeof (dof_hdr_t).
11722 */
11723 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11724 dtrace_dof_error(NULL, "failed to copyin DOF header");
11725 *errp = EFAULT;
11726 return (NULL);
11727 }
11728
11729 /*
11730 * Now we'll allocate the entire DOF and copy it in -- provided
11731 * that the length isn't outrageous.
11732 */
11733 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11734 dtrace_dof_error(&hdr, "load size exceeds maximum");
11735 *errp = E2BIG;
11736 return (NULL);
11737 }
11738
11739 if (hdr.dofh_loadsz < sizeof (hdr)) {
11740 dtrace_dof_error(&hdr, "invalid load size");
11741 *errp = EINVAL;
11742 return (NULL);
11743 }
11744
11745 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11746
11747 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
11748 dof->dofh_loadsz != hdr.dofh_loadsz) {
11749 kmem_free(dof, hdr.dofh_loadsz);
11750 *errp = EFAULT;
11751 return (NULL);
11752 }
11753
11754 return (dof);
11755 }
11756
11757 #if 0
11758 #if !defined(sun)
11759 static __inline uchar_t
11760 dtrace_dof_char(char c) {
11761 switch (c) {
11762 case '0':
11763 case '1':
11764 case '2':
11765 case '3':
11766 case '4':
11767 case '5':
11768 case '6':
11769 case '7':
11770 case '8':
11771 case '9':
11772 return (c - '0');
11773 case 'A':
11774 case 'B':
11775 case 'C':
11776 case 'D':
11777 case 'E':
11778 case 'F':
11779 return (c - 'A' + 10);
11780 case 'a':
11781 case 'b':
11782 case 'c':
11783 case 'd':
11784 case 'e':
11785 case 'f':
11786 return (c - 'a' + 10);
11787 }
11788 /* Should not reach here. */
11789 return (0);
11790 }
11791 #endif
11792 #endif
11793
11794 static dof_hdr_t *
11795 dtrace_dof_property(const char *name)
11796 {
11797 dof_hdr_t *dof = NULL;
11798 #if defined(sun)
11799 uchar_t *buf;
11800 uint64_t loadsz;
11801 unsigned int len, i;
11802
11803 /*
11804 * Unfortunately, array of values in .conf files are always (and
11805 * only) interpreted to be integer arrays. We must read our DOF
11806 * as an integer array, and then squeeze it into a byte array.
11807 */
11808 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11809 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11810 return (NULL);
11811
11812 for (i = 0; i < len; i++)
11813 buf[i] = (uchar_t)(((int *)buf)[i]);
11814
11815 if (len < sizeof (dof_hdr_t)) {
11816 ddi_prop_free(buf);
11817 dtrace_dof_error(NULL, "truncated header");
11818 return (NULL);
11819 }
11820
11821 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11822 ddi_prop_free(buf);
11823 dtrace_dof_error(NULL, "truncated DOF");
11824 return (NULL);
11825 }
11826
11827 if (loadsz >= dtrace_dof_maxsize) {
11828 ddi_prop_free(buf);
11829 dtrace_dof_error(NULL, "oversized DOF");
11830 return (NULL);
11831 }
11832
11833 dof = kmem_alloc(loadsz, KM_SLEEP);
11834 bcopy(buf, dof, loadsz);
11835 ddi_prop_free(buf);
11836 #else
11837 printf("dtrace: XXX %s not implemented (name=%s)\n", __func__, name);
11838 #if 0 /* XXX TBD dtrace_dof_provide */
11839 char *p;
11840 char *p_env;
11841
11842 if ((p_env = getenv(name)) == NULL)
11843 return (NULL);
11844
11845 len = strlen(p_env) / 2;
11846
11847 buf = kmem_alloc(len, KM_SLEEP);
11848
11849 dof = (dof_hdr_t *) buf;
11850
11851 p = p_env;
11852
11853 for (i = 0; i < len; i++) {
11854 buf[i] = (dtrace_dof_char(p[0]) << 4) |
11855 dtrace_dof_char(p[1]);
11856 p += 2;
11857 }
11858
11859 freeenv(p_env);
11860
11861 if (len < sizeof (dof_hdr_t)) {
11862 kmem_free(buf, len);
11863 dtrace_dof_error(NULL, "truncated header");
11864 return (NULL);
11865 }
11866
11867 if (len < (loadsz = dof->dofh_loadsz)) {
11868 kmem_free(buf, len);
11869 dtrace_dof_error(NULL, "truncated DOF");
11870 return (NULL);
11871 }
11872
11873 if (loadsz >= dtrace_dof_maxsize) {
11874 kmem_free(buf, len);
11875 dtrace_dof_error(NULL, "oversized DOF");
11876 return (NULL);
11877 }
11878 #endif
11879 #endif
11880
11881 return (dof);
11882 }
11883
11884 static void
11885 dtrace_dof_destroy(dof_hdr_t *dof)
11886 {
11887 kmem_free(dof, dof->dofh_loadsz);
11888 }
11889
11890 /*
11891 * Return the dof_sec_t pointer corresponding to a given section index. If the
11892 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11893 * a type other than DOF_SECT_NONE is specified, the header is checked against
11894 * this type and NULL is returned if the types do not match.
11895 */
11896 static dof_sec_t *
11897 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11898 {
11899 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11900 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11901
11902 if (i >= dof->dofh_secnum) {
11903 dtrace_dof_error(dof, "referenced section index is invalid");
11904 return (NULL);
11905 }
11906
11907 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11908 dtrace_dof_error(dof, "referenced section is not loadable");
11909 return (NULL);
11910 }
11911
11912 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11913 dtrace_dof_error(dof, "referenced section is the wrong type");
11914 return (NULL);
11915 }
11916
11917 return (sec);
11918 }
11919
11920 static dtrace_probedesc_t *
11921 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11922 {
11923 dof_probedesc_t *probe;
11924 dof_sec_t *strtab;
11925 uintptr_t daddr = (uintptr_t)dof;
11926 uintptr_t str;
11927 size_t size;
11928
11929 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11930 dtrace_dof_error(dof, "invalid probe section");
11931 return (NULL);
11932 }
11933
11934 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11935 dtrace_dof_error(dof, "bad alignment in probe description");
11936 return (NULL);
11937 }
11938
11939 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11940 dtrace_dof_error(dof, "truncated probe description");
11941 return (NULL);
11942 }
11943
11944 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11945 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11946
11947 if (strtab == NULL)
11948 return (NULL);
11949
11950 str = daddr + strtab->dofs_offset;
11951 size = strtab->dofs_size;
11952
11953 if (probe->dofp_provider >= strtab->dofs_size) {
11954 dtrace_dof_error(dof, "corrupt probe provider");
11955 return (NULL);
11956 }
11957
11958 (void) strncpy(desc->dtpd_provider,
11959 (char *)(str + probe->dofp_provider),
11960 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11961
11962 if (probe->dofp_mod >= strtab->dofs_size) {
11963 dtrace_dof_error(dof, "corrupt probe module");
11964 return (NULL);
11965 }
11966
11967 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11968 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11969
11970 if (probe->dofp_func >= strtab->dofs_size) {
11971 dtrace_dof_error(dof, "corrupt probe function");
11972 return (NULL);
11973 }
11974
11975 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11976 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11977
11978 if (probe->dofp_name >= strtab->dofs_size) {
11979 dtrace_dof_error(dof, "corrupt probe name");
11980 return (NULL);
11981 }
11982
11983 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11984 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11985
11986 return (desc);
11987 }
11988
11989 static dtrace_difo_t *
11990 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11991 cred_t *cr)
11992 {
11993 dtrace_difo_t *dp;
11994 size_t ttl = 0;
11995 dof_difohdr_t *dofd;
11996 uintptr_t daddr = (uintptr_t)dof;
11997 size_t maxx = dtrace_difo_maxsize;
11998 int i, l, n;
11999
12000 static const struct {
12001 int section;
12002 int bufoffs;
12003 int lenoffs;
12004 int entsize;
12005 int align;
12006 const char *msg;
12007 } difo[] = {
12008 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12009 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12010 sizeof (dif_instr_t), "multiple DIF sections" },
12011
12012 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12013 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12014 sizeof (uint64_t), "multiple integer tables" },
12015
12016 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12017 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12018 sizeof (char), "multiple string tables" },
12019
12020 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12021 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12022 sizeof (uint_t), "multiple variable tables" },
12023
12024 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
12025 };
12026
12027 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12028 dtrace_dof_error(dof, "invalid DIFO header section");
12029 return (NULL);
12030 }
12031
12032 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12033 dtrace_dof_error(dof, "bad alignment in DIFO header");
12034 return (NULL);
12035 }
12036
12037 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12038 sec->dofs_size % sizeof (dof_secidx_t)) {
12039 dtrace_dof_error(dof, "bad size in DIFO header");
12040 return (NULL);
12041 }
12042
12043 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12044 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12045
12046 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12047 dp->dtdo_rtype = dofd->dofd_rtype;
12048
12049 for (l = 0; l < n; l++) {
12050 dof_sec_t *subsec;
12051 void **bufp;
12052 uint32_t *lenp;
12053
12054 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12055 dofd->dofd_links[l])) == NULL)
12056 goto err; /* invalid section link */
12057
12058 if (ttl + subsec->dofs_size > maxx) {
12059 dtrace_dof_error(dof, "exceeds maximum size");
12060 goto err;
12061 }
12062
12063 ttl += subsec->dofs_size;
12064
12065 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12066 if (subsec->dofs_type != difo[i].section)
12067 continue;
12068
12069 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12070 dtrace_dof_error(dof, "section not loaded");
12071 goto err;
12072 }
12073
12074 if (subsec->dofs_align != difo[i].align) {
12075 dtrace_dof_error(dof, "bad alignment");
12076 goto err;
12077 }
12078
12079 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12080 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12081
12082 if (*bufp != NULL) {
12083 dtrace_dof_error(dof, difo[i].msg);
12084 goto err;
12085 }
12086
12087 if (difo[i].entsize != subsec->dofs_entsize) {
12088 dtrace_dof_error(dof, "entry size mismatch");
12089 goto err;
12090 }
12091
12092 if (subsec->dofs_entsize != 0 &&
12093 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12094 dtrace_dof_error(dof, "corrupt entry size");
12095 goto err;
12096 }
12097
12098 *lenp = subsec->dofs_size;
12099 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12100 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12101 *bufp, subsec->dofs_size);
12102
12103 if (subsec->dofs_entsize != 0)
12104 *lenp /= subsec->dofs_entsize;
12105
12106 break;
12107 }
12108
12109 /*
12110 * If we encounter a loadable DIFO sub-section that is not
12111 * known to us, assume this is a broken program and fail.
12112 */
12113 if (difo[i].section == DOF_SECT_NONE &&
12114 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12115 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12116 goto err;
12117 }
12118 }
12119
12120 if (dp->dtdo_buf == NULL) {
12121 /*
12122 * We can't have a DIF object without DIF text.
12123 */
12124 dtrace_dof_error(dof, "missing DIF text");
12125 goto err;
12126 }
12127
12128 /*
12129 * Before we validate the DIF object, run through the variable table
12130 * looking for the strings -- if any of their size are under, we'll set
12131 * their size to be the system-wide default string size. Note that
12132 * this should _not_ happen if the "strsize" option has been set --
12133 * in this case, the compiler should have set the size to reflect the
12134 * setting of the option.
12135 */
12136 for (i = 0; i < dp->dtdo_varlen; i++) {
12137 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12138 dtrace_diftype_t *t = &v->dtdv_type;
12139
12140 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12141 continue;
12142
12143 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12144 t->dtdt_size = dtrace_strsize_default;
12145 }
12146
12147 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12148 goto err;
12149
12150 dtrace_difo_init(dp, vstate);
12151 return (dp);
12152
12153 err:
12154 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12155 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12156 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12157 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12158
12159 kmem_free(dp, sizeof (dtrace_difo_t));
12160 return (NULL);
12161 }
12162
12163 static dtrace_predicate_t *
12164 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12165 cred_t *cr)
12166 {
12167 dtrace_difo_t *dp;
12168
12169 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12170 return (NULL);
12171
12172 return (dtrace_predicate_create(dp));
12173 }
12174
12175 static dtrace_actdesc_t *
12176 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12177 cred_t *cr)
12178 {
12179 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12180 dof_actdesc_t *desc;
12181 dof_sec_t *difosec;
12182 size_t offs;
12183 uintptr_t daddr = (uintptr_t)dof;
12184 uint64_t arg;
12185 dtrace_actkind_t kind;
12186
12187 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12188 dtrace_dof_error(dof, "invalid action section");
12189 return (NULL);
12190 }
12191
12192 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12193 dtrace_dof_error(dof, "truncated action description");
12194 return (NULL);
12195 }
12196
12197 if (sec->dofs_align != sizeof (uint64_t)) {
12198 dtrace_dof_error(dof, "bad alignment in action description");
12199 return (NULL);
12200 }
12201
12202 if (sec->dofs_size < sec->dofs_entsize) {
12203 dtrace_dof_error(dof, "section entry size exceeds total size");
12204 return (NULL);
12205 }
12206
12207 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12208 dtrace_dof_error(dof, "bad entry size in action description");
12209 return (NULL);
12210 }
12211
12212 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12213 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12214 return (NULL);
12215 }
12216
12217 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12218 desc = (dof_actdesc_t *)(daddr +
12219 (uintptr_t)sec->dofs_offset + offs);
12220 kind = (dtrace_actkind_t)desc->dofa_kind;
12221
12222 if (DTRACEACT_ISPRINTFLIKE(kind) &&
12223 (kind != DTRACEACT_PRINTA ||
12224 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12225 dof_sec_t *strtab;
12226 char *str, *fmt;
12227 uint64_t i;
12228
12229 /*
12230 * printf()-like actions must have a format string.
12231 */
12232 if ((strtab = dtrace_dof_sect(dof,
12233 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12234 goto err;
12235
12236 str = (char *)((uintptr_t)dof +
12237 (uintptr_t)strtab->dofs_offset);
12238
12239 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12240 if (str[i] == '\0')
12241 break;
12242 }
12243
12244 if (i >= strtab->dofs_size) {
12245 dtrace_dof_error(dof, "bogus format string");
12246 goto err;
12247 }
12248
12249 if (i == desc->dofa_arg) {
12250 dtrace_dof_error(dof, "empty format string");
12251 goto err;
12252 }
12253
12254 i -= desc->dofa_arg;
12255 fmt = kmem_alloc(i + 1, KM_SLEEP);
12256 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12257 arg = (uint64_t)(uintptr_t)fmt;
12258 } else {
12259 if (kind == DTRACEACT_PRINTA) {
12260 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12261 arg = 0;
12262 } else {
12263 arg = desc->dofa_arg;
12264 }
12265 }
12266
12267 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12268 desc->dofa_uarg, arg);
12269
12270 if (last != NULL) {
12271 last->dtad_next = act;
12272 } else {
12273 first = act;
12274 }
12275
12276 last = act;
12277
12278 if (desc->dofa_difo == DOF_SECIDX_NONE)
12279 continue;
12280
12281 if ((difosec = dtrace_dof_sect(dof,
12282 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12283 goto err;
12284
12285 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12286
12287 if (act->dtad_difo == NULL)
12288 goto err;
12289 }
12290
12291 ASSERT(first != NULL);
12292 return (first);
12293
12294 err:
12295 for (act = first; act != NULL; act = next) {
12296 next = act->dtad_next;
12297 dtrace_actdesc_release(act, vstate);
12298 }
12299
12300 return (NULL);
12301 }
12302
12303 static dtrace_ecbdesc_t *
12304 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12305 cred_t *cr)
12306 {
12307 dtrace_ecbdesc_t *ep;
12308 dof_ecbdesc_t *ecb;
12309 dtrace_probedesc_t *desc;
12310 dtrace_predicate_t *pred = NULL;
12311
12312 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12313 dtrace_dof_error(dof, "truncated ECB description");
12314 return (NULL);
12315 }
12316
12317 if (sec->dofs_align != sizeof (uint64_t)) {
12318 dtrace_dof_error(dof, "bad alignment in ECB description");
12319 return (NULL);
12320 }
12321
12322 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12323 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12324
12325 if (sec == NULL)
12326 return (NULL);
12327
12328 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12329 ep->dted_uarg = ecb->dofe_uarg;
12330 desc = &ep->dted_probe;
12331
12332 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12333 goto err;
12334
12335 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12336 if ((sec = dtrace_dof_sect(dof,
12337 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12338 goto err;
12339
12340 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12341 goto err;
12342
12343 ep->dted_pred.dtpdd_predicate = pred;
12344 }
12345
12346 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12347 if ((sec = dtrace_dof_sect(dof,
12348 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12349 goto err;
12350
12351 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12352
12353 if (ep->dted_action == NULL)
12354 goto err;
12355 }
12356
12357 return (ep);
12358
12359 err:
12360 if (pred != NULL)
12361 dtrace_predicate_release(pred, vstate);
12362 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12363 return (NULL);
12364 }
12365
12366 /*
12367 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12368 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12369 * site of any user SETX relocations to account for load object base address.
12370 * In the future, if we need other relocations, this function can be extended.
12371 */
12372 static int
12373 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12374 {
12375 uintptr_t daddr = (uintptr_t)dof;
12376 dof_relohdr_t *dofr =
12377 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12378 dof_sec_t *ss, *rs, *ts;
12379 dof_relodesc_t *r;
12380 uint_t i, n;
12381
12382 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12383 sec->dofs_align != sizeof (dof_secidx_t)) {
12384 dtrace_dof_error(dof, "invalid relocation header");
12385 return (-1);
12386 }
12387
12388 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12389 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12390 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12391
12392 if (ss == NULL || rs == NULL || ts == NULL)
12393 return (-1); /* dtrace_dof_error() has been called already */
12394
12395 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12396 rs->dofs_align != sizeof (uint64_t)) {
12397 dtrace_dof_error(dof, "invalid relocation section");
12398 return (-1);
12399 }
12400
12401 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12402 n = rs->dofs_size / rs->dofs_entsize;
12403
12404 for (i = 0; i < n; i++) {
12405 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12406
12407 switch (r->dofr_type) {
12408 case DOF_RELO_NONE:
12409 break;
12410 case DOF_RELO_SETX:
12411 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12412 sizeof (uint64_t) > ts->dofs_size) {
12413 dtrace_dof_error(dof, "bad relocation offset");
12414 return (-1);
12415 }
12416
12417 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12418 dtrace_dof_error(dof, "misaligned setx relo");
12419 return (-1);
12420 }
12421
12422 *(uint64_t *)taddr += ubase;
12423 break;
12424 default:
12425 dtrace_dof_error(dof, "invalid relocation type");
12426 return (-1);
12427 }
12428
12429 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12430 }
12431
12432 return (0);
12433 }
12434
12435 /*
12436 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12437 * header: it should be at the front of a memory region that is at least
12438 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12439 * size. It need not be validated in any other way.
12440 */
12441 static int
12442 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12443 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12444 {
12445 uint64_t len = dof->dofh_loadsz, seclen;
12446 uintptr_t daddr = (uintptr_t)dof;
12447 dtrace_ecbdesc_t *ep;
12448 dtrace_enabling_t *enab;
12449 uint_t i;
12450
12451 ASSERT(MUTEX_HELD(&dtrace_lock));
12452 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12453
12454 /*
12455 * Check the DOF header identification bytes. In addition to checking
12456 * valid settings, we also verify that unused bits/bytes are zeroed so
12457 * we can use them later without fear of regressing existing binaries.
12458 */
12459 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12460 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12461 dtrace_dof_error(dof, "DOF magic string mismatch");
12462 return (-1);
12463 }
12464
12465 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12466 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12467 dtrace_dof_error(dof, "DOF has invalid data model");
12468 return (-1);
12469 }
12470
12471 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12472 dtrace_dof_error(dof, "DOF encoding mismatch");
12473 return (-1);
12474 }
12475
12476 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12477 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12478 dtrace_dof_error(dof, "DOF version mismatch");
12479 return (-1);
12480 }
12481
12482 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12483 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12484 return (-1);
12485 }
12486
12487 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12488 dtrace_dof_error(dof, "DOF uses too many integer registers");
12489 return (-1);
12490 }
12491
12492 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12493 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12494 return (-1);
12495 }
12496
12497 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12498 if (dof->dofh_ident[i] != 0) {
12499 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12500 return (-1);
12501 }
12502 }
12503
12504 if (dof->dofh_flags & ~DOF_FL_VALID) {
12505 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12506 return (-1);
12507 }
12508
12509 if (dof->dofh_secsize == 0) {
12510 dtrace_dof_error(dof, "zero section header size");
12511 return (-1);
12512 }
12513
12514 /*
12515 * Check that the section headers don't exceed the amount of DOF
12516 * data. Note that we cast the section size and number of sections
12517 * to uint64_t's to prevent possible overflow in the multiplication.
12518 */
12519 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12520
12521 if (dof->dofh_secoff > len || seclen > len ||
12522 dof->dofh_secoff + seclen > len) {
12523 dtrace_dof_error(dof, "truncated section headers");
12524 return (-1);
12525 }
12526
12527 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12528 dtrace_dof_error(dof, "misaligned section headers");
12529 return (-1);
12530 }
12531
12532 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12533 dtrace_dof_error(dof, "misaligned section size");
12534 return (-1);
12535 }
12536
12537 /*
12538 * Take an initial pass through the section headers to be sure that
12539 * the headers don't have stray offsets. If the 'noprobes' flag is
12540 * set, do not permit sections relating to providers, probes, or args.
12541 */
12542 for (i = 0; i < dof->dofh_secnum; i++) {
12543 dof_sec_t *sec = (dof_sec_t *)(daddr +
12544 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12545
12546 if (noprobes) {
12547 switch (sec->dofs_type) {
12548 case DOF_SECT_PROVIDER:
12549 case DOF_SECT_PROBES:
12550 case DOF_SECT_PRARGS:
12551 case DOF_SECT_PROFFS:
12552 dtrace_dof_error(dof, "illegal sections "
12553 "for enabling");
12554 return (-1);
12555 }
12556 }
12557
12558 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
12559 !(sec->dofs_flags & DOF_SECF_LOAD)) {
12560 dtrace_dof_error(dof, "loadable section with load "
12561 "flag unset");
12562 return (-1);
12563 }
12564
12565 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12566 continue; /* just ignore non-loadable sections */
12567
12568 if (sec->dofs_align & (sec->dofs_align - 1)) {
12569 dtrace_dof_error(dof, "bad section alignment");
12570 return (-1);
12571 }
12572
12573 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12574 dtrace_dof_error(dof, "misaligned section");
12575 return (-1);
12576 }
12577
12578 if (sec->dofs_offset > len || sec->dofs_size > len ||
12579 sec->dofs_offset + sec->dofs_size > len) {
12580 dtrace_dof_error(dof, "corrupt section header");
12581 return (-1);
12582 }
12583
12584 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12585 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12586 dtrace_dof_error(dof, "non-terminating string table");
12587 return (-1);
12588 }
12589 }
12590
12591 /*
12592 * Take a second pass through the sections and locate and perform any
12593 * relocations that are present. We do this after the first pass to
12594 * be sure that all sections have had their headers validated.
12595 */
12596 for (i = 0; i < dof->dofh_secnum; i++) {
12597 dof_sec_t *sec = (dof_sec_t *)(daddr +
12598 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12599
12600 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12601 continue; /* skip sections that are not loadable */
12602
12603 switch (sec->dofs_type) {
12604 case DOF_SECT_URELHDR:
12605 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12606 return (-1);
12607 break;
12608 }
12609 }
12610
12611 if ((enab = *enabp) == NULL)
12612 enab = *enabp = dtrace_enabling_create(vstate);
12613
12614 for (i = 0; i < dof->dofh_secnum; i++) {
12615 dof_sec_t *sec = (dof_sec_t *)(daddr +
12616 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12617
12618 if (sec->dofs_type != DOF_SECT_ECBDESC)
12619 continue;
12620
12621 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12622 dtrace_enabling_destroy(enab);
12623 *enabp = NULL;
12624 return (-1);
12625 }
12626
12627 dtrace_enabling_add(enab, ep);
12628 }
12629
12630 return (0);
12631 }
12632
12633 /*
12634 * Process DOF for any options. This routine assumes that the DOF has been
12635 * at least processed by dtrace_dof_slurp().
12636 */
12637 static int
12638 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12639 {
12640 int i, rval;
12641 uint32_t entsize;
12642 size_t offs;
12643 dof_optdesc_t *desc;
12644
12645 for (i = 0; i < dof->dofh_secnum; i++) {
12646 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12647 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12648
12649 if (sec->dofs_type != DOF_SECT_OPTDESC)
12650 continue;
12651
12652 if (sec->dofs_align != sizeof (uint64_t)) {
12653 dtrace_dof_error(dof, "bad alignment in "
12654 "option description");
12655 return (EINVAL);
12656 }
12657
12658 if ((entsize = sec->dofs_entsize) == 0) {
12659 dtrace_dof_error(dof, "zeroed option entry size");
12660 return (EINVAL);
12661 }
12662
12663 if (entsize < sizeof (dof_optdesc_t)) {
12664 dtrace_dof_error(dof, "bad option entry size");
12665 return (EINVAL);
12666 }
12667
12668 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12669 desc = (dof_optdesc_t *)((uintptr_t)dof +
12670 (uintptr_t)sec->dofs_offset + offs);
12671
12672 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12673 dtrace_dof_error(dof, "non-zero option string");
12674 return (EINVAL);
12675 }
12676
12677 if (desc->dofo_value == DTRACEOPT_UNSET) {
12678 dtrace_dof_error(dof, "unset option");
12679 return (EINVAL);
12680 }
12681
12682 if ((rval = dtrace_state_option(state,
12683 desc->dofo_option, desc->dofo_value)) != 0) {
12684 dtrace_dof_error(dof, "rejected option");
12685 return (rval);
12686 }
12687 }
12688 }
12689
12690 return (0);
12691 }
12692
12693 /*
12694 * DTrace Consumer State Functions
12695 */
12696 static int
12697 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12698 {
12699 size_t hashsize, maxper, minn, chunksize = dstate->dtds_chunksize;
12700 void *base;
12701 uintptr_t limit;
12702 dtrace_dynvar_t *dvar, *next, *start;
12703 int i;
12704
12705 ASSERT(MUTEX_HELD(&dtrace_lock));
12706 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12707
12708 bzero(dstate, sizeof (dtrace_dstate_t));
12709
12710 if ((dstate->dtds_chunksize = chunksize) == 0)
12711 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12712
12713 if (size < (minn = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12714 size = minn;
12715
12716 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12717 return (ENOMEM);
12718
12719 dstate->dtds_size = size;
12720 dstate->dtds_base = base;
12721 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12722 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12723
12724 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12725
12726 if (hashsize != 1 && (hashsize & 1))
12727 hashsize--;
12728
12729 dstate->dtds_hashsize = hashsize;
12730 dstate->dtds_hash = dstate->dtds_base;
12731
12732 /*
12733 * Set all of our hash buckets to point to the single sink, and (if
12734 * it hasn't already been set), set the sink's hash value to be the
12735 * sink sentinel value. The sink is needed for dynamic variable
12736 * lookups to know that they have iterated over an entire, valid hash
12737 * chain.
12738 */
12739 for (i = 0; i < hashsize; i++)
12740 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12741
12742 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12743 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12744
12745 /*
12746 * Determine number of active CPUs. Divide free list evenly among
12747 * active CPUs.
12748 */
12749 start = (dtrace_dynvar_t *)
12750 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12751 limit = (uintptr_t)base + size;
12752
12753 maxper = (limit - (uintptr_t)start) / NCPU;
12754 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12755
12756 for (i = 0; i < NCPU; i++) {
12757 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12758
12759 /*
12760 * If we don't even have enough chunks to make it once through
12761 * NCPUs, we're just going to allocate everything to the first
12762 * CPU. And if we're on the last CPU, we're going to allocate
12763 * whatever is left over. In either case, we set the limit to
12764 * be the limit of the dynamic variable space.
12765 */
12766 if (maxper == 0 || i == NCPU - 1) {
12767 limit = (uintptr_t)base + size;
12768 start = NULL;
12769 } else {
12770 limit = (uintptr_t)start + maxper;
12771 start = (dtrace_dynvar_t *)limit;
12772 }
12773
12774 ASSERT(limit <= (uintptr_t)base + size);
12775
12776 for (;;) {
12777 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12778 dstate->dtds_chunksize);
12779
12780 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12781 break;
12782
12783 dvar->dtdv_next = next;
12784 dvar = next;
12785 }
12786
12787 if (maxper == 0)
12788 break;
12789 }
12790
12791 return (0);
12792 }
12793
12794 static void
12795 dtrace_dstate_fini(dtrace_dstate_t *dstate)
12796 {
12797 ASSERT(MUTEX_HELD(&cpu_lock));
12798
12799 if (dstate->dtds_base == NULL)
12800 return;
12801
12802 kmem_free(dstate->dtds_base, dstate->dtds_size);
12803 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12804 }
12805
12806 static void
12807 dtrace_vstate_fini(dtrace_vstate_t *vstate)
12808 {
12809 /*
12810 * Logical XOR, where are you?
12811 */
12812 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12813
12814 if (vstate->dtvs_nglobals > 0) {
12815 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12816 sizeof (dtrace_statvar_t *));
12817 }
12818
12819 if (vstate->dtvs_ntlocals > 0) {
12820 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12821 sizeof (dtrace_difv_t));
12822 }
12823
12824 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12825
12826 if (vstate->dtvs_nlocals > 0) {
12827 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12828 sizeof (dtrace_statvar_t *));
12829 }
12830 }
12831
12832 static void
12833 dtrace_state_clean(dtrace_state_t *state)
12834 {
12835 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12836 return;
12837
12838 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12839 dtrace_speculation_clean(state);
12840 }
12841
12842 static void
12843 dtrace_state_deadman(dtrace_state_t *state)
12844 {
12845 hrtime_t now;
12846
12847 dtrace_sync();
12848
12849 now = dtrace_gethrtime();
12850
12851 if (state != dtrace_anon.dta_state &&
12852 now - state->dts_laststatus >= dtrace_deadman_user)
12853 return;
12854
12855 /*
12856 * We must be sure that dts_alive never appears to be less than the
12857 * value upon entry to dtrace_state_deadman(), and because we lack a
12858 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12859 * store INT64_MAX to it, followed by a memory barrier, followed by
12860 * the new value. This assures that dts_alive never appears to be
12861 * less than its true value, regardless of the order in which the
12862 * stores to the underlying storage are issued.
12863 */
12864 state->dts_alive = INT64_MAX;
12865 dtrace_membar_producer();
12866 state->dts_alive = now;
12867 }
12868
12869 #if !defined(sun)
12870 struct dtrace_state_worker *dtrace_state_worker_add(void (*)(dtrace_state_t *),
12871 dtrace_state_t *, hrtime_t);
12872 void dtrace_state_worker_remove(struct dtrace_state_worker *);
12873 #endif
12874
12875 static dtrace_state_t *
12876 #if defined(sun)
12877 dtrace_state_create(dev_t *devp, cred_t *cr)
12878 #else
12879 dtrace_state_create(dev_t dev, cred_t *cr)
12880 #endif
12881 {
12882 #if defined(sun)
12883 minor_t minor;
12884 major_t major;
12885 #else
12886 int m = 0;
12887 #endif
12888 char c[30];
12889 dtrace_state_t *state;
12890 dtrace_optval_t *opt;
12891 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12892
12893 ASSERT(MUTEX_HELD(&dtrace_lock));
12894 ASSERT(MUTEX_HELD(&cpu_lock));
12895
12896 #if defined(sun)
12897 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12898 VM_BESTFIT | VM_SLEEP);
12899
12900 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12901 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12902 return (NULL);
12903 }
12904
12905 state = ddi_get_soft_state(dtrace_softstate, minor);
12906 #else
12907 m = minor(dev) & 0x0F;
12908
12909 /* Allocate memory for the state. */
12910 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
12911 #endif
12912
12913 state->dts_epid = DTRACE_EPIDNONE + 1;
12914
12915 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
12916 #if defined(sun)
12917 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12918 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12919
12920 if (devp != NULL) {
12921 major = getemajor(*devp);
12922 } else {
12923 major = ddi_driver_major(dtrace_devi);
12924 }
12925
12926 state->dts_dev = makedevice(major, minor);
12927
12928 if (devp != NULL)
12929 *devp = state->dts_dev;
12930 #else
12931 state->dts_aggid_arena = vmem_create(c, 1, INT_MAX, 1,
12932 NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
12933 state->dts_dev = dev;
12934 #endif
12935
12936 /*
12937 * We allocate NCPU buffers. On the one hand, this can be quite
12938 * a bit of memory per instance (nearly 36K on a Starcat). On the
12939 * other hand, it saves an additional memory reference in the probe
12940 * path.
12941 */
12942 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12943 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12944
12945 #if defined(sun)
12946 state->dts_cleaner = CYCLIC_NONE;
12947 state->dts_deadman = CYCLIC_NONE;
12948 #else
12949 state->dts_cleaner = NULL;
12950 state->dts_deadman = NULL;
12951 #endif
12952 state->dts_vstate.dtvs_state = state;
12953
12954 for (i = 0; i < DTRACEOPT_MAX; i++)
12955 state->dts_options[i] = DTRACEOPT_UNSET;
12956
12957 /*
12958 * Set the default options.
12959 */
12960 opt = state->dts_options;
12961 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12962 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12963 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12964 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12965 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12966 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12967 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12968 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12969 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12970 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12971 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12972 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12973 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12974 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12975
12976 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12977
12978 /*
12979 * Depending on the user credentials, we set flag bits which alter probe
12980 * visibility or the amount of destructiveness allowed. In the case of
12981 * actual anonymous tracing, or the possession of all privileges, all of
12982 * the normal checks are bypassed.
12983 */
12984 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12985 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12986 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12987 } else {
12988 /*
12989 * Set up the credentials for this instantiation. We take a
12990 * hold on the credential to prevent it from disappearing on
12991 * us; this in turn prevents the zone_t referenced by this
12992 * credential from disappearing. This means that we can
12993 * examine the credential and the zone from probe context.
12994 */
12995 #if defined(sun)
12996 crhold(cr);
12997 #else
12998 kauth_cred_hold(cr);
12999 #endif
13000 state->dts_cred.dcr_cred = cr;
13001
13002 /*
13003 * CRA_PROC means "we have *some* privilege for dtrace" and
13004 * unlocks the use of variables like pid, zonename, etc.
13005 */
13006 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13007 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13008 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13009 }
13010
13011 /*
13012 * dtrace_user allows use of syscall and profile providers.
13013 * If the user also has proc_owner and/or proc_zone, we
13014 * extend the scope to include additional visibility and
13015 * destructive power.
13016 */
13017 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13018 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13019 state->dts_cred.dcr_visible |=
13020 DTRACE_CRV_ALLPROC;
13021
13022 state->dts_cred.dcr_action |=
13023 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13024 }
13025
13026 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13027 state->dts_cred.dcr_visible |=
13028 DTRACE_CRV_ALLZONE;
13029
13030 state->dts_cred.dcr_action |=
13031 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13032 }
13033
13034 /*
13035 * If we have all privs in whatever zone this is,
13036 * we can do destructive things to processes which
13037 * have altered credentials.
13038 */
13039 #if defined(sun)
13040 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13041 cr->cr_zone->zone_privset)) {
13042 state->dts_cred.dcr_action |=
13043 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13044 }
13045 #endif
13046 }
13047
13048 /*
13049 * Holding the dtrace_kernel privilege also implies that
13050 * the user has the dtrace_user privilege from a visibility
13051 * perspective. But without further privileges, some
13052 * destructive actions are not available.
13053 */
13054 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13055 /*
13056 * Make all probes in all zones visible. However,
13057 * this doesn't mean that all actions become available
13058 * to all zones.
13059 */
13060 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13061 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13062
13063 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13064 DTRACE_CRA_PROC;
13065 /*
13066 * Holding proc_owner means that destructive actions
13067 * for *this* zone are allowed.
13068 */
13069 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13070 state->dts_cred.dcr_action |=
13071 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13072
13073 /*
13074 * Holding proc_zone means that destructive actions
13075 * for this user/group ID in all zones is allowed.
13076 */
13077 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13078 state->dts_cred.dcr_action |=
13079 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13080
13081 #if defined(sun)
13082 /*
13083 * If we have all privs in whatever zone this is,
13084 * we can do destructive things to processes which
13085 * have altered credentials.
13086 */
13087 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13088 cr->cr_zone->zone_privset)) {
13089 state->dts_cred.dcr_action |=
13090 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13091 }
13092 #endif
13093 }
13094
13095 /*
13096 * Holding the dtrace_proc privilege gives control over fasttrap
13097 * and pid providers. We need to grant wider destructive
13098 * privileges in the event that the user has proc_owner and/or
13099 * proc_zone.
13100 */
13101 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13102 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13103 state->dts_cred.dcr_action |=
13104 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13105
13106 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13107 state->dts_cred.dcr_action |=
13108 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13109 }
13110 }
13111
13112 return (state);
13113 }
13114
13115 static int
13116 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13117 {
13118 dtrace_optval_t *opt = state->dts_options, size;
13119 processorid_t cpu = 0;;
13120 int flags = 0, rval;
13121
13122 ASSERT(MUTEX_HELD(&dtrace_lock));
13123 ASSERT(MUTEX_HELD(&cpu_lock));
13124 ASSERT(which < DTRACEOPT_MAX);
13125 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13126 (state == dtrace_anon.dta_state &&
13127 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13128
13129 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13130 return (0);
13131
13132 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13133 cpu = opt[DTRACEOPT_CPU];
13134
13135 if (which == DTRACEOPT_SPECSIZE)
13136 flags |= DTRACEBUF_NOSWITCH;
13137
13138 if (which == DTRACEOPT_BUFSIZE) {
13139 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13140 flags |= DTRACEBUF_RING;
13141
13142 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13143 flags |= DTRACEBUF_FILL;
13144
13145 if (state != dtrace_anon.dta_state ||
13146 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13147 flags |= DTRACEBUF_INACTIVE;
13148 }
13149
13150 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
13151 /*
13152 * The size must be 8-byte aligned. If the size is not 8-byte
13153 * aligned, drop it down by the difference.
13154 */
13155 if (size & (sizeof (uint64_t) - 1))
13156 size -= size & (sizeof (uint64_t) - 1);
13157
13158 if (size < state->dts_reserve) {
13159 /*
13160 * Buffers always must be large enough to accommodate
13161 * their prereserved space. We return E2BIG instead
13162 * of ENOMEM in this case to allow for user-level
13163 * software to differentiate the cases.
13164 */
13165 return (E2BIG);
13166 }
13167
13168 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
13169
13170 if (rval != ENOMEM) {
13171 opt[which] = size;
13172 return (rval);
13173 }
13174
13175 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13176 return (rval);
13177 }
13178
13179 return (ENOMEM);
13180 }
13181
13182 static int
13183 dtrace_state_buffers(dtrace_state_t *state)
13184 {
13185 dtrace_speculation_t *spec = state->dts_speculations;
13186 int rval, i;
13187
13188 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13189 DTRACEOPT_BUFSIZE)) != 0)
13190 return (rval);
13191
13192 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13193 DTRACEOPT_AGGSIZE)) != 0)
13194 return (rval);
13195
13196 for (i = 0; i < state->dts_nspeculations; i++) {
13197 if ((rval = dtrace_state_buffer(state,
13198 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13199 return (rval);
13200 }
13201
13202 return (0);
13203 }
13204
13205 static void
13206 dtrace_state_prereserve(dtrace_state_t *state)
13207 {
13208 dtrace_ecb_t *ecb;
13209 dtrace_probe_t *probe;
13210
13211 state->dts_reserve = 0;
13212
13213 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13214 return;
13215
13216 /*
13217 * If our buffer policy is a "fill" buffer policy, we need to set the
13218 * prereserved space to be the space required by the END probes.
13219 */
13220 probe = dtrace_probes[dtrace_probeid_end - 1];
13221 ASSERT(probe != NULL);
13222
13223 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13224 if (ecb->dte_state != state)
13225 continue;
13226
13227 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13228 }
13229 }
13230
13231 static int
13232 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13233 {
13234 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13235 dtrace_speculation_t *spec;
13236 dtrace_buffer_t *buf;
13237 #if defined(sun)
13238 cyc_handler_t hdlr;
13239 cyc_time_t when;
13240 #endif
13241 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13242 dtrace_icookie_t cookie;
13243
13244 mutex_enter(&cpu_lock);
13245 mutex_enter(&dtrace_lock);
13246
13247 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13248 rval = EBUSY;
13249 goto out;
13250 }
13251
13252 /*
13253 * Before we can perform any checks, we must prime all of the
13254 * retained enablings that correspond to this state.
13255 */
13256 dtrace_enabling_prime(state);
13257
13258 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13259 rval = EACCES;
13260 goto out;
13261 }
13262
13263 dtrace_state_prereserve(state);
13264
13265 /*
13266 * Now we want to do is try to allocate our speculations.
13267 * We do not automatically resize the number of speculations; if
13268 * this fails, we will fail the operation.
13269 */
13270 nspec = opt[DTRACEOPT_NSPEC];
13271 ASSERT(nspec != DTRACEOPT_UNSET);
13272
13273 if (nspec > INT_MAX) {
13274 rval = ENOMEM;
13275 goto out;
13276 }
13277
13278 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
13279
13280 if (spec == NULL) {
13281 rval = ENOMEM;
13282 goto out;
13283 }
13284
13285 state->dts_speculations = spec;
13286 state->dts_nspeculations = (int)nspec;
13287
13288 for (i = 0; i < nspec; i++) {
13289 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
13290 rval = ENOMEM;
13291 goto err;
13292 }
13293
13294 spec[i].dtsp_buffer = buf;
13295 }
13296
13297 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13298 if (dtrace_anon.dta_state == NULL) {
13299 rval = ENOENT;
13300 goto out;
13301 }
13302
13303 if (state->dts_necbs != 0) {
13304 rval = EALREADY;
13305 goto out;
13306 }
13307
13308 state->dts_anon = dtrace_anon_grab();
13309 ASSERT(state->dts_anon != NULL);
13310 state = state->dts_anon;
13311
13312 /*
13313 * We want "grabanon" to be set in the grabbed state, so we'll
13314 * copy that option value from the grabbing state into the
13315 * grabbed state.
13316 */
13317 state->dts_options[DTRACEOPT_GRABANON] =
13318 opt[DTRACEOPT_GRABANON];
13319
13320 *cpu = dtrace_anon.dta_beganon;
13321
13322 /*
13323 * If the anonymous state is active (as it almost certainly
13324 * is if the anonymous enabling ultimately matched anything),
13325 * we don't allow any further option processing -- but we
13326 * don't return failure.
13327 */
13328 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13329 goto out;
13330 }
13331
13332 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13333 opt[DTRACEOPT_AGGSIZE] != 0) {
13334 if (state->dts_aggregations == NULL) {
13335 /*
13336 * We're not going to create an aggregation buffer
13337 * because we don't have any ECBs that contain
13338 * aggregations -- set this option to 0.
13339 */
13340 opt[DTRACEOPT_AGGSIZE] = 0;
13341 } else {
13342 /*
13343 * If we have an aggregation buffer, we must also have
13344 * a buffer to use as scratch.
13345 */
13346 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13347 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13348 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13349 }
13350 }
13351 }
13352
13353 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13354 opt[DTRACEOPT_SPECSIZE] != 0) {
13355 if (!state->dts_speculates) {
13356 /*
13357 * We're not going to create speculation buffers
13358 * because we don't have any ECBs that actually
13359 * speculate -- set the speculation size to 0.
13360 */
13361 opt[DTRACEOPT_SPECSIZE] = 0;
13362 }
13363 }
13364
13365 /*
13366 * The bare minimum size for any buffer that we're actually going to
13367 * do anything to is sizeof (uint64_t).
13368 */
13369 sz = sizeof (uint64_t);
13370
13371 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13372 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13373 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13374 /*
13375 * A buffer size has been explicitly set to 0 (or to a size
13376 * that will be adjusted to 0) and we need the space -- we
13377 * need to return failure. We return ENOSPC to differentiate
13378 * it from failing to allocate a buffer due to failure to meet
13379 * the reserve (for which we return E2BIG).
13380 */
13381 rval = ENOSPC;
13382 goto out;
13383 }
13384
13385 if ((rval = dtrace_state_buffers(state)) != 0)
13386 goto err;
13387
13388 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13389 sz = dtrace_dstate_defsize;
13390
13391 do {
13392 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13393
13394 if (rval == 0)
13395 break;
13396
13397 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13398 goto err;
13399 } while (sz >>= 1);
13400
13401 opt[DTRACEOPT_DYNVARSIZE] = sz;
13402
13403 if (rval != 0)
13404 goto err;
13405
13406 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13407 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13408
13409 if (opt[DTRACEOPT_CLEANRATE] == 0)
13410 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13411
13412 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13413 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13414
13415 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13416 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13417
13418 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13419 #if defined(sun)
13420 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13421 hdlr.cyh_arg = state;
13422 hdlr.cyh_level = CY_LOW_LEVEL;
13423
13424 when.cyt_when = 0;
13425 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13426
13427 state->dts_cleaner = cyclic_add(&hdlr, &when);
13428
13429 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13430 hdlr.cyh_arg = state;
13431 hdlr.cyh_level = CY_LOW_LEVEL;
13432
13433 when.cyt_when = 0;
13434 when.cyt_interval = dtrace_deadman_interval;
13435
13436 state->dts_deadman = cyclic_add(&hdlr, &when);
13437 #else
13438 state->dts_cleaner = dtrace_state_worker_add(
13439 dtrace_state_clean, state, opt[DTRACEOPT_CLEANRATE]);
13440 state->dts_deadman = dtrace_state_worker_add(
13441 dtrace_state_deadman, state, dtrace_deadman_interval);
13442 #endif
13443
13444 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13445
13446 /*
13447 * Now it's time to actually fire the BEGIN probe. We need to disable
13448 * interrupts here both to record the CPU on which we fired the BEGIN
13449 * probe (the data from this CPU will be processed first at user
13450 * level) and to manually activate the buffer for this CPU.
13451 */
13452 cookie = dtrace_interrupt_disable();
13453 *cpu = curcpu_id;
13454 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13455 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13456
13457 dtrace_probe(dtrace_probeid_begin,
13458 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13459 dtrace_interrupt_enable(cookie);
13460 /*
13461 * We may have had an exit action from a BEGIN probe; only change our
13462 * state to ACTIVE if we're still in WARMUP.
13463 */
13464 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13465 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13466
13467 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13468 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13469
13470 /*
13471 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13472 * want each CPU to transition its principal buffer out of the
13473 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13474 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13475 * atomically transition from processing none of a state's ECBs to
13476 * processing all of them.
13477 */
13478 dtrace_xcall(DTRACE_CPUALL,
13479 (dtrace_xcall_t)dtrace_buffer_activate, state);
13480 goto out;
13481
13482 err:
13483 dtrace_buffer_free(state->dts_buffer);
13484 dtrace_buffer_free(state->dts_aggbuffer);
13485
13486 if ((nspec = state->dts_nspeculations) == 0) {
13487 ASSERT(state->dts_speculations == NULL);
13488 goto out;
13489 }
13490
13491 spec = state->dts_speculations;
13492 ASSERT(spec != NULL);
13493
13494 for (i = 0; i < state->dts_nspeculations; i++) {
13495 if ((buf = spec[i].dtsp_buffer) == NULL)
13496 break;
13497
13498 dtrace_buffer_free(buf);
13499 kmem_free(buf, bufsize);
13500 }
13501
13502 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13503 state->dts_nspeculations = 0;
13504 state->dts_speculations = NULL;
13505
13506 out:
13507 mutex_exit(&dtrace_lock);
13508 mutex_exit(&cpu_lock);
13509
13510 return (rval);
13511 }
13512
13513 static int
13514 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13515 {
13516 dtrace_icookie_t cookie;
13517
13518 ASSERT(MUTEX_HELD(&dtrace_lock));
13519
13520 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13521 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13522 return (EINVAL);
13523
13524 /*
13525 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13526 * to be sure that every CPU has seen it. See below for the details
13527 * on why this is done.
13528 */
13529 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13530 dtrace_sync();
13531
13532 /*
13533 * By this point, it is impossible for any CPU to be still processing
13534 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13535 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13536 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13537 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13538 * iff we're in the END probe.
13539 */
13540 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13541 dtrace_sync();
13542 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13543
13544 /*
13545 * Finally, we can release the reserve and call the END probe. We
13546 * disable interrupts across calling the END probe to allow us to
13547 * return the CPU on which we actually called the END probe. This
13548 * allows user-land to be sure that this CPU's principal buffer is
13549 * processed last.
13550 */
13551 state->dts_reserve = 0;
13552
13553 cookie = dtrace_interrupt_disable();
13554 *cpu = curcpu_id;
13555 dtrace_probe(dtrace_probeid_end,
13556 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13557 dtrace_interrupt_enable(cookie);
13558
13559 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13560 dtrace_sync();
13561
13562 return (0);
13563 }
13564
13565 static int
13566 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13567 dtrace_optval_t val)
13568 {
13569 ASSERT(MUTEX_HELD(&dtrace_lock));
13570
13571 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13572 return (EBUSY);
13573
13574 if (option >= DTRACEOPT_MAX)
13575 return (EINVAL);
13576
13577 if (option != DTRACEOPT_CPU && val < 0)
13578 return (EINVAL);
13579
13580 switch (option) {
13581 case DTRACEOPT_DESTRUCTIVE:
13582 if (dtrace_destructive_disallow)
13583 return (EACCES);
13584
13585 state->dts_cred.dcr_destructive = 1;
13586 break;
13587
13588 case DTRACEOPT_BUFSIZE:
13589 case DTRACEOPT_DYNVARSIZE:
13590 case DTRACEOPT_AGGSIZE:
13591 case DTRACEOPT_SPECSIZE:
13592 case DTRACEOPT_STRSIZE:
13593 if (val < 0)
13594 return (EINVAL);
13595
13596 if (val >= LONG_MAX) {
13597 /*
13598 * If this is an otherwise negative value, set it to
13599 * the highest multiple of 128m less than LONG_MAX.
13600 * Technically, we're adjusting the size without
13601 * regard to the buffer resizing policy, but in fact,
13602 * this has no effect -- if we set the buffer size to
13603 * ~LONG_MAX and the buffer policy is ultimately set to
13604 * be "manual", the buffer allocation is guaranteed to
13605 * fail, if only because the allocation requires two
13606 * buffers. (We set the the size to the highest
13607 * multiple of 128m because it ensures that the size
13608 * will remain a multiple of a megabyte when
13609 * repeatedly halved -- all the way down to 15m.)
13610 */
13611 val = LONG_MAX - (1 << 27) + 1;
13612 }
13613 }
13614
13615 state->dts_options[option] = val;
13616
13617 return (0);
13618 }
13619
13620 static void
13621 dtrace_state_destroy(dtrace_state_t *state)
13622 {
13623 dtrace_ecb_t *ecb;
13624 dtrace_vstate_t *vstate = &state->dts_vstate;
13625 #if defined(sun)
13626 minor_t minor = getminor(state->dts_dev);
13627 #endif
13628 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13629 dtrace_speculation_t *spec = state->dts_speculations;
13630 int nspec = state->dts_nspeculations;
13631 uint32_t match;
13632
13633 ASSERT(MUTEX_HELD(&dtrace_lock));
13634 ASSERT(MUTEX_HELD(&cpu_lock));
13635
13636 /*
13637 * First, retract any retained enablings for this state.
13638 */
13639 dtrace_enabling_retract(state);
13640 ASSERT(state->dts_nretained == 0);
13641
13642 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13643 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13644 /*
13645 * We have managed to come into dtrace_state_destroy() on a
13646 * hot enabling -- almost certainly because of a disorderly
13647 * shutdown of a consumer. (That is, a consumer that is
13648 * exiting without having called dtrace_stop().) In this case,
13649 * we're going to set our activity to be KILLED, and then
13650 * issue a sync to be sure that everyone is out of probe
13651 * context before we start blowing away ECBs.
13652 */
13653 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13654 dtrace_sync();
13655 }
13656
13657 /*
13658 * Release the credential hold we took in dtrace_state_create().
13659 */
13660 if (state->dts_cred.dcr_cred != NULL) {
13661 #if defined(sun)
13662 crfree(state->dts_cred.dcr_cred);
13663 #else
13664 kauth_cred_free(state->dts_cred.dcr_cred);
13665 #endif
13666 }
13667
13668 /*
13669 * Now we can safely disable and destroy any enabled probes. Because
13670 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13671 * (especially if they're all enabled), we take two passes through the
13672 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13673 * in the second we disable whatever is left over.
13674 */
13675 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13676 for (i = 0; i < state->dts_necbs; i++) {
13677 if ((ecb = state->dts_ecbs[i]) == NULL)
13678 continue;
13679
13680 if (match && ecb->dte_probe != NULL) {
13681 dtrace_probe_t *probe = ecb->dte_probe;
13682 dtrace_provider_t *prov = probe->dtpr_provider;
13683
13684 if (!(prov->dtpv_priv.dtpp_flags & match))
13685 continue;
13686 }
13687
13688 dtrace_ecb_disable(ecb);
13689 dtrace_ecb_destroy(ecb);
13690 }
13691
13692 if (!match)
13693 break;
13694 }
13695
13696 /*
13697 * Before we free the buffers, perform one more sync to assure that
13698 * every CPU is out of probe context.
13699 */
13700 dtrace_sync();
13701
13702 dtrace_buffer_free(state->dts_buffer);
13703 dtrace_buffer_free(state->dts_aggbuffer);
13704
13705 for (i = 0; i < nspec; i++)
13706 dtrace_buffer_free(spec[i].dtsp_buffer);
13707
13708 #if defined(sun)
13709 if (state->dts_cleaner != CYCLIC_NONE)
13710 cyclic_remove(state->dts_cleaner);
13711
13712 if (state->dts_deadman != CYCLIC_NONE)
13713 cyclic_remove(state->dts_deadman);
13714 #else
13715 if (state->dts_cleaner != NULL)
13716 dtrace_state_worker_remove(state->dts_cleaner);
13717
13718 if (state->dts_deadman != NULL)
13719 dtrace_state_worker_remove(state->dts_deadman);
13720 #endif
13721
13722 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13723 dtrace_vstate_fini(vstate);
13724 if (state->dts_ecbs != NULL)
13725 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13726
13727 if (state->dts_aggregations != NULL) {
13728 #ifdef DEBUG
13729 for (i = 0; i < state->dts_naggregations; i++)
13730 ASSERT(state->dts_aggregations[i] == NULL);
13731 #endif
13732 ASSERT(state->dts_naggregations > 0);
13733 kmem_free(state->dts_aggregations,
13734 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13735 }
13736
13737 kmem_free(state->dts_buffer, bufsize);
13738 kmem_free(state->dts_aggbuffer, bufsize);
13739
13740 for (i = 0; i < nspec; i++)
13741 kmem_free(spec[i].dtsp_buffer, bufsize);
13742
13743 if (spec != NULL)
13744 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13745
13746 dtrace_format_destroy(state);
13747
13748 if (state->dts_aggid_arena != NULL) {
13749 vmem_destroy(state->dts_aggid_arena);
13750 state->dts_aggid_arena = NULL;
13751 }
13752 #if defined(sun)
13753 ddi_soft_state_free(dtrace_softstate, minor);
13754 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13755 #else
13756 kmem_free(state, sizeof(dtrace_state_t));
13757 #endif
13758 }
13759
13760 /*
13761 * DTrace Anonymous Enabling Functions
13762 */
13763 static dtrace_state_t *
13764 dtrace_anon_grab(void)
13765 {
13766 dtrace_state_t *state;
13767
13768 ASSERT(MUTEX_HELD(&dtrace_lock));
13769
13770 if ((state = dtrace_anon.dta_state) == NULL) {
13771 ASSERT(dtrace_anon.dta_enabling == NULL);
13772 return (NULL);
13773 }
13774
13775 ASSERT(dtrace_anon.dta_enabling != NULL);
13776 ASSERT(dtrace_retained != NULL);
13777
13778 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
13779 dtrace_anon.dta_enabling = NULL;
13780 dtrace_anon.dta_state = NULL;
13781
13782 return (state);
13783 }
13784
13785 static void
13786 dtrace_anon_property(void)
13787 {
13788 int i, rv;
13789 dtrace_state_t *state;
13790 dof_hdr_t *dof;
13791 char c[32]; /* enough for "dof-data-" + digits */
13792
13793 ASSERT(MUTEX_HELD(&dtrace_lock));
13794 ASSERT(MUTEX_HELD(&cpu_lock));
13795
13796 for (i = 0; ; i++) {
13797 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
13798
13799 dtrace_err_verbose = 1;
13800
13801 if ((dof = dtrace_dof_property(c)) == NULL) {
13802 dtrace_err_verbose = 0;
13803 break;
13804 }
13805
13806 #if defined(sun)
13807 /*
13808 * We want to create anonymous state, so we need to transition
13809 * the kernel debugger to indicate that DTrace is active. If
13810 * this fails (e.g. because the debugger has modified text in
13811 * some way), we won't continue with the processing.
13812 */
13813 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
13814 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
13815 "enabling ignored.");
13816 dtrace_dof_destroy(dof);
13817 break;
13818 }
13819 #endif
13820
13821 /*
13822 * If we haven't allocated an anonymous state, we'll do so now.
13823 */
13824 if ((state = dtrace_anon.dta_state) == NULL) {
13825 #if defined(sun)
13826 state = dtrace_state_create(NULL, NULL);
13827 #endif
13828 dtrace_anon.dta_state = state;
13829
13830 if (state == NULL) {
13831 /*
13832 * This basically shouldn't happen: the only
13833 * failure mode from dtrace_state_create() is a
13834 * failure of ddi_soft_state_zalloc() that
13835 * itself should never happen. Still, the
13836 * interface allows for a failure mode, and
13837 * we want to fail as gracefully as possible:
13838 * we'll emit an error message and cease
13839 * processing anonymous state in this case.
13840 */
13841 cmn_err(CE_WARN, "failed to create "
13842 "anonymous state");
13843 dtrace_dof_destroy(dof);
13844 break;
13845 }
13846 }
13847
13848 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13849 &dtrace_anon.dta_enabling, 0, B_TRUE);
13850
13851 if (rv == 0)
13852 rv = dtrace_dof_options(dof, state);
13853
13854 dtrace_err_verbose = 0;
13855 dtrace_dof_destroy(dof);
13856
13857 if (rv != 0) {
13858 /*
13859 * This is malformed DOF; chuck any anonymous state
13860 * that we created.
13861 */
13862 ASSERT(dtrace_anon.dta_enabling == NULL);
13863 dtrace_state_destroy(state);
13864 dtrace_anon.dta_state = NULL;
13865 break;
13866 }
13867
13868 ASSERT(dtrace_anon.dta_enabling != NULL);
13869 }
13870
13871 if (dtrace_anon.dta_enabling != NULL) {
13872 int rval;
13873
13874 /*
13875 * dtrace_enabling_retain() can only fail because we are
13876 * trying to retain more enablings than are allowed -- but
13877 * we only have one anonymous enabling, and we are guaranteed
13878 * to be allowed at least one retained enabling; we assert
13879 * that dtrace_enabling_retain() returns success.
13880 */
13881 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13882 ASSERT(rval == 0);
13883
13884 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13885 }
13886 }
13887
13888 /*
13889 * DTrace Helper Functions
13890 */
13891 static void
13892 dtrace_helper_trace(dtrace_helper_action_t *helper,
13893 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13894 {
13895 uint32_t size, next, nnext, i;
13896 dtrace_helptrace_t *ent;
13897 uint16_t flags = cpu_core[curcpu_id].cpuc_dtrace_flags;
13898
13899 if (!dtrace_helptrace_enabled)
13900 return;
13901
13902 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
13903
13904 /*
13905 * What would a tracing framework be without its own tracing
13906 * framework? (Well, a hell of a lot simpler, for starters...)
13907 */
13908 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13909 sizeof (uint64_t) - sizeof (uint64_t);
13910
13911 /*
13912 * Iterate until we can allocate a slot in the trace buffer.
13913 */
13914 do {
13915 next = dtrace_helptrace_next;
13916
13917 if (next + size < dtrace_helptrace_bufsize) {
13918 nnext = next + size;
13919 } else {
13920 nnext = size;
13921 }
13922 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13923
13924 /*
13925 * We have our slot; fill it in.
13926 */
13927 if (nnext == size)
13928 next = 0;
13929
13930 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13931 ent->dtht_helper = helper;
13932 ent->dtht_where = where;
13933 ent->dtht_nlocals = vstate->dtvs_nlocals;
13934
13935 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13936 mstate->dtms_fltoffs : -1;
13937 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13938 ent->dtht_illval = cpu_core[curcpu_id].cpuc_dtrace_illval;
13939
13940 for (i = 0; i < vstate->dtvs_nlocals; i++) {
13941 dtrace_statvar_t *svar;
13942
13943 if ((svar = vstate->dtvs_locals[i]) == NULL)
13944 continue;
13945
13946 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13947 ent->dtht_locals[i] =
13948 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu_id];
13949 }
13950 }
13951
13952 static uint64_t
13953 dtrace_helper(int which, dtrace_mstate_t *mstate,
13954 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13955 {
13956 uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
13957 uint64_t sarg0 = mstate->dtms_arg[0];
13958 uint64_t sarg1 = mstate->dtms_arg[1];
13959 uint64_t rval = 0;
13960 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13961 dtrace_helper_action_t *helper;
13962 dtrace_vstate_t *vstate;
13963 dtrace_difo_t *pred;
13964 int i, trace = dtrace_helptrace_enabled;
13965
13966 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13967
13968 if (helpers == NULL)
13969 return (0);
13970
13971 if ((helper = helpers->dthps_actions[which]) == NULL)
13972 return (0);
13973
13974 vstate = &helpers->dthps_vstate;
13975 mstate->dtms_arg[0] = arg0;
13976 mstate->dtms_arg[1] = arg1;
13977
13978 /*
13979 * Now iterate over each helper. If its predicate evaluates to 'true',
13980 * we'll call the corresponding actions. Note that the below calls
13981 * to dtrace_dif_emulate() may set faults in machine state. This is
13982 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13983 * the stored DIF offset with its own (which is the desired behavior).
13984 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13985 * from machine state; this is okay, too.
13986 */
13987 for (; helper != NULL; helper = helper->dtha_next) {
13988 if ((pred = helper->dtha_predicate) != NULL) {
13989 if (trace)
13990 dtrace_helper_trace(helper, mstate, vstate, 0);
13991
13992 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13993 goto next;
13994
13995 if (*flags & CPU_DTRACE_FAULT)
13996 goto err;
13997 }
13998
13999 for (i = 0; i < helper->dtha_nactions; i++) {
14000 if (trace)
14001 dtrace_helper_trace(helper,
14002 mstate, vstate, i + 1);
14003
14004 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14005 mstate, vstate, state);
14006
14007 if (*flags & CPU_DTRACE_FAULT)
14008 goto err;
14009 }
14010
14011 next:
14012 if (trace)
14013 dtrace_helper_trace(helper, mstate, vstate,
14014 DTRACE_HELPTRACE_NEXT);
14015 }
14016
14017 if (trace)
14018 dtrace_helper_trace(helper, mstate, vstate,
14019 DTRACE_HELPTRACE_DONE);
14020
14021 /*
14022 * Restore the arg0 that we saved upon entry.
14023 */
14024 mstate->dtms_arg[0] = sarg0;
14025 mstate->dtms_arg[1] = sarg1;
14026
14027 return (rval);
14028
14029 err:
14030 if (trace)
14031 dtrace_helper_trace(helper, mstate, vstate,
14032 DTRACE_HELPTRACE_ERR);
14033
14034 /*
14035 * Restore the arg0 that we saved upon entry.
14036 */
14037 mstate->dtms_arg[0] = sarg0;
14038 mstate->dtms_arg[1] = sarg1;
14039
14040 return (0);
14041 }
14042
14043 #if defined(sun)
14044 static void
14045 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14046 dtrace_vstate_t *vstate)
14047 {
14048 int i;
14049
14050 if (helper->dtha_predicate != NULL)
14051 dtrace_difo_release(helper->dtha_predicate, vstate);
14052
14053 for (i = 0; i < helper->dtha_nactions; i++) {
14054 ASSERT(helper->dtha_actions[i] != NULL);
14055 dtrace_difo_release(helper->dtha_actions[i], vstate);
14056 }
14057
14058 kmem_free(helper->dtha_actions,
14059 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14060 kmem_free(helper, sizeof (dtrace_helper_action_t));
14061 }
14062
14063 static int
14064 dtrace_helper_destroygen(int gen)
14065 {
14066 proc_t *p = curproc;
14067 dtrace_helpers_t *help = p->p_dtrace_helpers;
14068 dtrace_vstate_t *vstate;
14069 int i;
14070
14071 ASSERT(MUTEX_HELD(&dtrace_lock));
14072
14073 if (help == NULL || gen > help->dthps_generation)
14074 return (EINVAL);
14075
14076 vstate = &help->dthps_vstate;
14077
14078 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14079 dtrace_helper_action_t *last = NULL, *h, *next;
14080
14081 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14082 next = h->dtha_next;
14083
14084 if (h->dtha_generation == gen) {
14085 if (last != NULL) {
14086 last->dtha_next = next;
14087 } else {
14088 help->dthps_actions[i] = next;
14089 }
14090
14091 dtrace_helper_action_destroy(h, vstate);
14092 } else {
14093 last = h;
14094 }
14095 }
14096 }
14097
14098 /*
14099 * Interate until we've cleared out all helper providers with the
14100 * given generation number.
14101 */
14102 for (;;) {
14103 dtrace_helper_provider_t *prov;
14104
14105 /*
14106 * Look for a helper provider with the right generation. We
14107 * have to start back at the beginning of the list each time
14108 * because we drop dtrace_lock. It's unlikely that we'll make
14109 * more than two passes.
14110 */
14111 for (i = 0; i < help->dthps_nprovs; i++) {
14112 prov = help->dthps_provs[i];
14113
14114 if (prov->dthp_generation == gen)
14115 break;
14116 }
14117
14118 /*
14119 * If there were no matches, we're done.
14120 */
14121 if (i == help->dthps_nprovs)
14122 break;
14123
14124 /*
14125 * Move the last helper provider into this slot.
14126 */
14127 help->dthps_nprovs--;
14128 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14129 help->dthps_provs[help->dthps_nprovs] = NULL;
14130
14131 mutex_exit(&dtrace_lock);
14132
14133 /*
14134 * If we have a meta provider, remove this helper provider.
14135 */
14136 mutex_enter(&dtrace_meta_lock);
14137 if (dtrace_meta_pid != NULL) {
14138 ASSERT(dtrace_deferred_pid == NULL);
14139 dtrace_helper_provider_remove(&prov->dthp_prov,
14140 p->p_pid);
14141 }
14142 mutex_exit(&dtrace_meta_lock);
14143
14144 dtrace_helper_provider_destroy(prov);
14145
14146 mutex_enter(&dtrace_lock);
14147 }
14148
14149 return (0);
14150 }
14151
14152 static int
14153 dtrace_helper_validate(dtrace_helper_action_t *helper)
14154 {
14155 int err = 0, i;
14156 dtrace_difo_t *dp;
14157
14158 if ((dp = helper->dtha_predicate) != NULL)
14159 err += dtrace_difo_validate_helper(dp);
14160
14161 for (i = 0; i < helper->dtha_nactions; i++)
14162 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14163
14164 return (err == 0);
14165 }
14166
14167 static int
14168 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14169 {
14170 dtrace_helpers_t *help;
14171 dtrace_helper_action_t *helper, *last;
14172 dtrace_actdesc_t *act;
14173 dtrace_vstate_t *vstate;
14174 dtrace_predicate_t *pred;
14175 int count = 0, nactions = 0, i;
14176
14177 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14178 return (EINVAL);
14179
14180 help = curproc->p_dtrace_helpers;
14181 last = help->dthps_actions[which];
14182 vstate = &help->dthps_vstate;
14183
14184 for (count = 0; last != NULL; last = last->dtha_next) {
14185 count++;
14186 if (last->dtha_next == NULL)
14187 break;
14188 }
14189
14190 /*
14191 * If we already have dtrace_helper_actions_max helper actions for this
14192 * helper action type, we'll refuse to add a new one.
14193 */
14194 if (count >= dtrace_helper_actions_max)
14195 return (ENOSPC);
14196
14197 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14198 helper->dtha_generation = help->dthps_generation;
14199
14200 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14201 ASSERT(pred->dtp_difo != NULL);
14202 dtrace_difo_hold(pred->dtp_difo);
14203 helper->dtha_predicate = pred->dtp_difo;
14204 }
14205
14206 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14207 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14208 goto err;
14209
14210 if (act->dtad_difo == NULL)
14211 goto err;
14212
14213 nactions++;
14214 }
14215
14216 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14217 (helper->dtha_nactions = nactions), KM_SLEEP);
14218
14219 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14220 dtrace_difo_hold(act->dtad_difo);
14221 helper->dtha_actions[i++] = act->dtad_difo;
14222 }
14223
14224 if (!dtrace_helper_validate(helper))
14225 goto err;
14226
14227 if (last == NULL) {
14228 help->dthps_actions[which] = helper;
14229 } else {
14230 last->dtha_next = helper;
14231 }
14232
14233 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14234 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14235 dtrace_helptrace_next = 0;
14236 }
14237
14238 return (0);
14239 err:
14240 dtrace_helper_action_destroy(helper, vstate);
14241 return (EINVAL);
14242 }
14243
14244 static void
14245 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14246 dof_helper_t *dofhp)
14247 {
14248 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14249
14250 mutex_enter(&dtrace_meta_lock);
14251 mutex_enter(&dtrace_lock);
14252
14253 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14254 /*
14255 * If the dtrace module is loaded but not attached, or if
14256 * there aren't isn't a meta provider registered to deal with
14257 * these provider descriptions, we need to postpone creating
14258 * the actual providers until later.
14259 */
14260
14261 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14262 dtrace_deferred_pid != help) {
14263 help->dthps_deferred = 1;
14264 help->dthps_pid = p->p_pid;
14265 help->dthps_next = dtrace_deferred_pid;
14266 help->dthps_prev = NULL;
14267 if (dtrace_deferred_pid != NULL)
14268 dtrace_deferred_pid->dthps_prev = help;
14269 dtrace_deferred_pid = help;
14270 }
14271
14272 mutex_exit(&dtrace_lock);
14273
14274 } else if (dofhp != NULL) {
14275 /*
14276 * If the dtrace module is loaded and we have a particular
14277 * helper provider description, pass that off to the
14278 * meta provider.
14279 */
14280
14281 mutex_exit(&dtrace_lock);
14282
14283 dtrace_helper_provide(dofhp, p->p_pid);
14284
14285 } else {
14286 /*
14287 * Otherwise, just pass all the helper provider descriptions
14288 * off to the meta provider.
14289 */
14290
14291 int i;
14292 mutex_exit(&dtrace_lock);
14293
14294 for (i = 0; i < help->dthps_nprovs; i++) {
14295 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14296 p->p_pid);
14297 }
14298 }
14299
14300 mutex_exit(&dtrace_meta_lock);
14301 }
14302
14303 static int
14304 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14305 {
14306 dtrace_helpers_t *help;
14307 dtrace_helper_provider_t *hprov, **tmp_provs;
14308 uint_t tmp_maxprovs, i;
14309
14310 ASSERT(MUTEX_HELD(&dtrace_lock));
14311
14312 help = curproc->p_dtrace_helpers;
14313 ASSERT(help != NULL);
14314
14315 /*
14316 * If we already have dtrace_helper_providers_max helper providers,
14317 * we're refuse to add a new one.
14318 */
14319 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14320 return (ENOSPC);
14321
14322 /*
14323 * Check to make sure this isn't a duplicate.
14324 */
14325 for (i = 0; i < help->dthps_nprovs; i++) {
14326 if (dofhp->dofhp_addr ==
14327 help->dthps_provs[i]->dthp_prov.dofhp_addr)
14328 return (EALREADY);
14329 }
14330
14331 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14332 hprov->dthp_prov = *dofhp;
14333 hprov->dthp_ref = 1;
14334 hprov->dthp_generation = gen;
14335
14336 /*
14337 * Allocate a bigger table for helper providers if it's already full.
14338 */
14339 if (help->dthps_maxprovs == help->dthps_nprovs) {
14340 tmp_maxprovs = help->dthps_maxprovs;
14341 tmp_provs = help->dthps_provs;
14342
14343 if (help->dthps_maxprovs == 0)
14344 help->dthps_maxprovs = 2;
14345 else
14346 help->dthps_maxprovs *= 2;
14347 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14348 help->dthps_maxprovs = dtrace_helper_providers_max;
14349
14350 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14351
14352 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14353 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14354
14355 if (tmp_provs != NULL) {
14356 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14357 sizeof (dtrace_helper_provider_t *));
14358 kmem_free(tmp_provs, tmp_maxprovs *
14359 sizeof (dtrace_helper_provider_t *));
14360 }
14361 }
14362
14363 help->dthps_provs[help->dthps_nprovs] = hprov;
14364 help->dthps_nprovs++;
14365
14366 return (0);
14367 }
14368
14369 static void
14370 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14371 {
14372 mutex_enter(&dtrace_lock);
14373
14374 if (--hprov->dthp_ref == 0) {
14375 dof_hdr_t *dof;
14376 mutex_exit(&dtrace_lock);
14377 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14378 dtrace_dof_destroy(dof);
14379 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14380 } else {
14381 mutex_exit(&dtrace_lock);
14382 }
14383 }
14384
14385 static int
14386 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14387 {
14388 uintptr_t daddr = (uintptr_t)dof;
14389 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14390 dof_provider_t *provider;
14391 dof_probe_t *probe;
14392 uint8_t *arg;
14393 char *strtab, *typestr;
14394 dof_stridx_t typeidx;
14395 size_t typesz;
14396 uint_t nprobes, j, k;
14397
14398 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14399
14400 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14401 dtrace_dof_error(dof, "misaligned section offset");
14402 return (-1);
14403 }
14404
14405 /*
14406 * The section needs to be large enough to contain the DOF provider
14407 * structure appropriate for the given version.
14408 */
14409 if (sec->dofs_size <
14410 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14411 offsetof(dof_provider_t, dofpv_prenoffs) :
14412 sizeof (dof_provider_t))) {
14413 dtrace_dof_error(dof, "provider section too small");
14414 return (-1);
14415 }
14416
14417 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14418 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14419 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14420 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14421 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14422
14423 if (str_sec == NULL || prb_sec == NULL ||
14424 arg_sec == NULL || off_sec == NULL)
14425 return (-1);
14426
14427 enoff_sec = NULL;
14428
14429 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14430 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14431 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14432 provider->dofpv_prenoffs)) == NULL)
14433 return (-1);
14434
14435 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14436
14437 if (provider->dofpv_name >= str_sec->dofs_size ||
14438 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14439 dtrace_dof_error(dof, "invalid provider name");
14440 return (-1);
14441 }
14442
14443 if (prb_sec->dofs_entsize == 0 ||
14444 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14445 dtrace_dof_error(dof, "invalid entry size");
14446 return (-1);
14447 }
14448
14449 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14450 dtrace_dof_error(dof, "misaligned entry size");
14451 return (-1);
14452 }
14453
14454 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14455 dtrace_dof_error(dof, "invalid entry size");
14456 return (-1);
14457 }
14458
14459 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14460 dtrace_dof_error(dof, "misaligned section offset");
14461 return (-1);
14462 }
14463
14464 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14465 dtrace_dof_error(dof, "invalid entry size");
14466 return (-1);
14467 }
14468
14469 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14470
14471 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14472
14473 /*
14474 * Take a pass through the probes to check for errors.
14475 */
14476 for (j = 0; j < nprobes; j++) {
14477 probe = (dof_probe_t *)(uintptr_t)(daddr +
14478 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14479
14480 if (probe->dofpr_func >= str_sec->dofs_size) {
14481 dtrace_dof_error(dof, "invalid function name");
14482 return (-1);
14483 }
14484
14485 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14486 dtrace_dof_error(dof, "function name too long");
14487 return (-1);
14488 }
14489
14490 if (probe->dofpr_name >= str_sec->dofs_size ||
14491 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14492 dtrace_dof_error(dof, "invalid probe name");
14493 return (-1);
14494 }
14495
14496 /*
14497 * The offset count must not wrap the index, and the offsets
14498 * must also not overflow the section's data.
14499 */
14500 if (probe->dofpr_offidx + probe->dofpr_noffs <
14501 probe->dofpr_offidx ||
14502 (probe->dofpr_offidx + probe->dofpr_noffs) *
14503 off_sec->dofs_entsize > off_sec->dofs_size) {
14504 dtrace_dof_error(dof, "invalid probe offset");
14505 return (-1);
14506 }
14507
14508 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14509 /*
14510 * If there's no is-enabled offset section, make sure
14511 * there aren't any is-enabled offsets. Otherwise
14512 * perform the same checks as for probe offsets
14513 * (immediately above).
14514 */
14515 if (enoff_sec == NULL) {
14516 if (probe->dofpr_enoffidx != 0 ||
14517 probe->dofpr_nenoffs != 0) {
14518 dtrace_dof_error(dof, "is-enabled "
14519 "offsets with null section");
14520 return (-1);
14521 }
14522 } else if (probe->dofpr_enoffidx +
14523 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14524 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14525 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14526 dtrace_dof_error(dof, "invalid is-enabled "
14527 "offset");
14528 return (-1);
14529 }
14530
14531 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14532 dtrace_dof_error(dof, "zero probe and "
14533 "is-enabled offsets");
14534 return (-1);
14535 }
14536 } else if (probe->dofpr_noffs == 0) {
14537 dtrace_dof_error(dof, "zero probe offsets");
14538 return (-1);
14539 }
14540
14541 if (probe->dofpr_argidx + probe->dofpr_xargc <
14542 probe->dofpr_argidx ||
14543 (probe->dofpr_argidx + probe->dofpr_xargc) *
14544 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14545 dtrace_dof_error(dof, "invalid args");
14546 return (-1);
14547 }
14548
14549 typeidx = probe->dofpr_nargv;
14550 typestr = strtab + probe->dofpr_nargv;
14551 for (k = 0; k < probe->dofpr_nargc; k++) {
14552 if (typeidx >= str_sec->dofs_size) {
14553 dtrace_dof_error(dof, "bad "
14554 "native argument type");
14555 return (-1);
14556 }
14557
14558 typesz = strlen(typestr) + 1;
14559 if (typesz > DTRACE_ARGTYPELEN) {
14560 dtrace_dof_error(dof, "native "
14561 "argument type too long");
14562 return (-1);
14563 }
14564 typeidx += typesz;
14565 typestr += typesz;
14566 }
14567
14568 typeidx = probe->dofpr_xargv;
14569 typestr = strtab + probe->dofpr_xargv;
14570 for (k = 0; k < probe->dofpr_xargc; k++) {
14571 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14572 dtrace_dof_error(dof, "bad "
14573 "native argument index");
14574 return (-1);
14575 }
14576
14577 if (typeidx >= str_sec->dofs_size) {
14578 dtrace_dof_error(dof, "bad "
14579 "translated argument type");
14580 return (-1);
14581 }
14582
14583 typesz = strlen(typestr) + 1;
14584 if (typesz > DTRACE_ARGTYPELEN) {
14585 dtrace_dof_error(dof, "translated argument "
14586 "type too long");
14587 return (-1);
14588 }
14589
14590 typeidx += typesz;
14591 typestr += typesz;
14592 }
14593 }
14594
14595 return (0);
14596 }
14597
14598 static int
14599 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14600 {
14601 dtrace_helpers_t *help;
14602 dtrace_vstate_t *vstate;
14603 dtrace_enabling_t *enab = NULL;
14604 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14605 uintptr_t daddr = (uintptr_t)dof;
14606
14607 ASSERT(MUTEX_HELD(&dtrace_lock));
14608
14609 if ((help = curproc->p_dtrace_helpers) == NULL)
14610 help = dtrace_helpers_create(curproc);
14611
14612 vstate = &help->dthps_vstate;
14613
14614 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14615 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14616 dtrace_dof_destroy(dof);
14617 return (rv);
14618 }
14619
14620 /*
14621 * Look for helper providers and validate their descriptions.
14622 */
14623 if (dhp != NULL) {
14624 for (i = 0; i < dof->dofh_secnum; i++) {
14625 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14626 dof->dofh_secoff + i * dof->dofh_secsize);
14627
14628 if (sec->dofs_type != DOF_SECT_PROVIDER)
14629 continue;
14630
14631 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14632 dtrace_enabling_destroy(enab);
14633 dtrace_dof_destroy(dof);
14634 return (-1);
14635 }
14636
14637 nprovs++;
14638 }
14639 }
14640
14641 /*
14642 * Now we need to walk through the ECB descriptions in the enabling.
14643 */
14644 for (i = 0; i < enab->dten_ndesc; i++) {
14645 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14646 dtrace_probedesc_t *desc = &ep->dted_probe;
14647
14648 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14649 continue;
14650
14651 if (strcmp(desc->dtpd_mod, "helper") != 0)
14652 continue;
14653
14654 if (strcmp(desc->dtpd_func, "ustack") != 0)
14655 continue;
14656
14657 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14658 ep)) != 0) {
14659 /*
14660 * Adding this helper action failed -- we are now going
14661 * to rip out the entire generation and return failure.
14662 */
14663 (void) dtrace_helper_destroygen(help->dthps_generation);
14664 dtrace_enabling_destroy(enab);
14665 dtrace_dof_destroy(dof);
14666 return (-1);
14667 }
14668
14669 nhelpers++;
14670 }
14671
14672 if (nhelpers < enab->dten_ndesc)
14673 dtrace_dof_error(dof, "unmatched helpers");
14674
14675 gen = help->dthps_generation++;
14676 dtrace_enabling_destroy(enab);
14677
14678 if (dhp != NULL && nprovs > 0) {
14679 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14680 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14681 mutex_exit(&dtrace_lock);
14682 dtrace_helper_provider_register(curproc, help, dhp);
14683 mutex_enter(&dtrace_lock);
14684
14685 destroy = 0;
14686 }
14687 }
14688
14689 if (destroy)
14690 dtrace_dof_destroy(dof);
14691
14692 return (gen);
14693 }
14694
14695 static dtrace_helpers_t *
14696 dtrace_helpers_create(proc_t *p)
14697 {
14698 dtrace_helpers_t *help;
14699
14700 ASSERT(MUTEX_HELD(&dtrace_lock));
14701 ASSERT(p->p_dtrace_helpers == NULL);
14702
14703 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14704 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14705 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14706
14707 p->p_dtrace_helpers = help;
14708 dtrace_helpers++;
14709
14710 return (help);
14711 }
14712
14713 static void
14714 dtrace_helpers_destroy(void)
14715 {
14716 dtrace_helpers_t *help;
14717 dtrace_vstate_t *vstate;
14718 proc_t *p = curproc;
14719 int i;
14720
14721 mutex_enter(&dtrace_lock);
14722
14723 ASSERT(p->p_dtrace_helpers != NULL);
14724 ASSERT(dtrace_helpers > 0);
14725
14726 help = p->p_dtrace_helpers;
14727 vstate = &help->dthps_vstate;
14728
14729 /*
14730 * We're now going to lose the help from this process.
14731 */
14732 p->p_dtrace_helpers = NULL;
14733 dtrace_sync();
14734
14735 /*
14736 * Destory the helper actions.
14737 */
14738 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14739 dtrace_helper_action_t *h, *next;
14740
14741 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14742 next = h->dtha_next;
14743 dtrace_helper_action_destroy(h, vstate);
14744 h = next;
14745 }
14746 }
14747
14748 mutex_exit(&dtrace_lock);
14749
14750 /*
14751 * Destroy the helper providers.
14752 */
14753 if (help->dthps_maxprovs > 0) {
14754 mutex_enter(&dtrace_meta_lock);
14755 if (dtrace_meta_pid != NULL) {
14756 ASSERT(dtrace_deferred_pid == NULL);
14757
14758 for (i = 0; i < help->dthps_nprovs; i++) {
14759 dtrace_helper_provider_remove(
14760 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14761 }
14762 } else {
14763 mutex_enter(&dtrace_lock);
14764 ASSERT(help->dthps_deferred == 0 ||
14765 help->dthps_next != NULL ||
14766 help->dthps_prev != NULL ||
14767 help == dtrace_deferred_pid);
14768
14769 /*
14770 * Remove the helper from the deferred list.
14771 */
14772 if (help->dthps_next != NULL)
14773 help->dthps_next->dthps_prev = help->dthps_prev;
14774 if (help->dthps_prev != NULL)
14775 help->dthps_prev->dthps_next = help->dthps_next;
14776 if (dtrace_deferred_pid == help) {
14777 dtrace_deferred_pid = help->dthps_next;
14778 ASSERT(help->dthps_prev == NULL);
14779 }
14780
14781 mutex_exit(&dtrace_lock);
14782 }
14783
14784 mutex_exit(&dtrace_meta_lock);
14785
14786 for (i = 0; i < help->dthps_nprovs; i++) {
14787 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14788 }
14789
14790 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14791 sizeof (dtrace_helper_provider_t *));
14792 }
14793
14794 mutex_enter(&dtrace_lock);
14795
14796 dtrace_vstate_fini(&help->dthps_vstate);
14797 kmem_free(help->dthps_actions,
14798 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14799 kmem_free(help, sizeof (dtrace_helpers_t));
14800
14801 --dtrace_helpers;
14802 mutex_exit(&dtrace_lock);
14803 }
14804
14805 static void
14806 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14807 {
14808 dtrace_helpers_t *help, *newhelp;
14809 dtrace_helper_action_t *helper, *new, *last;
14810 dtrace_difo_t *dp;
14811 dtrace_vstate_t *vstate;
14812 int i, j, sz, hasprovs = 0;
14813
14814 mutex_enter(&dtrace_lock);
14815 ASSERT(from->p_dtrace_helpers != NULL);
14816 ASSERT(dtrace_helpers > 0);
14817
14818 help = from->p_dtrace_helpers;
14819 newhelp = dtrace_helpers_create(to);
14820 ASSERT(to->p_dtrace_helpers != NULL);
14821
14822 newhelp->dthps_generation = help->dthps_generation;
14823 vstate = &newhelp->dthps_vstate;
14824
14825 /*
14826 * Duplicate the helper actions.
14827 */
14828 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14829 if ((helper = help->dthps_actions[i]) == NULL)
14830 continue;
14831
14832 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14833 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14834 KM_SLEEP);
14835 new->dtha_generation = helper->dtha_generation;
14836
14837 if ((dp = helper->dtha_predicate) != NULL) {
14838 dp = dtrace_difo_duplicate(dp, vstate);
14839 new->dtha_predicate = dp;
14840 }
14841
14842 new->dtha_nactions = helper->dtha_nactions;
14843 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14844 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14845
14846 for (j = 0; j < new->dtha_nactions; j++) {
14847 dp = helper->dtha_actions[j];
14848
14849 ASSERT(dp != NULL);
14850 dp = dtrace_difo_duplicate(dp, vstate);
14851 new->dtha_actions[j] = dp;
14852 }
14853
14854 if (last != NULL) {
14855 last->dtha_next = new;
14856 } else {
14857 newhelp->dthps_actions[i] = new;
14858 }
14859
14860 last = new;
14861 }
14862 }
14863
14864 /*
14865 * Duplicate the helper providers and register them with the
14866 * DTrace framework.
14867 */
14868 if (help->dthps_nprovs > 0) {
14869 newhelp->dthps_nprovs = help->dthps_nprovs;
14870 newhelp->dthps_maxprovs = help->dthps_nprovs;
14871 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14872 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14873 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14874 newhelp->dthps_provs[i] = help->dthps_provs[i];
14875 newhelp->dthps_provs[i]->dthp_ref++;
14876 }
14877
14878 hasprovs = 1;
14879 }
14880
14881 mutex_exit(&dtrace_lock);
14882
14883 if (hasprovs)
14884 dtrace_helper_provider_register(to, newhelp, NULL);
14885 }
14886
14887 /*
14888 * DTrace Hook Functions
14889 */
14890 static void
14891 dtrace_module_loaded(modctl_t *ctl)
14892 {
14893 dtrace_provider_t *prv;
14894
14895 mutex_enter(&dtrace_provider_lock);
14896 mutex_enter(&mod_lock);
14897
14898 ASSERT(ctl->mod_busy);
14899
14900 /*
14901 * We're going to call each providers per-module provide operation
14902 * specifying only this module.
14903 */
14904 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14905 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14906
14907 mutex_exit(&mod_lock);
14908 mutex_exit(&dtrace_provider_lock);
14909
14910 /*
14911 * If we have any retained enablings, we need to match against them.
14912 * Enabling probes requires that cpu_lock be held, and we cannot hold
14913 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14914 * module. (In particular, this happens when loading scheduling
14915 * classes.) So if we have any retained enablings, we need to dispatch
14916 * our task queue to do the match for us.
14917 */
14918 mutex_enter(&dtrace_lock);
14919
14920 if (dtrace_retained == NULL) {
14921 mutex_exit(&dtrace_lock);
14922 return;
14923 }
14924
14925 (void) taskq_dispatch(dtrace_taskq,
14926 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14927
14928 mutex_exit(&dtrace_lock);
14929
14930 /*
14931 * And now, for a little heuristic sleaze: in general, we want to
14932 * match modules as soon as they load. However, we cannot guarantee
14933 * this, because it would lead us to the lock ordering violation
14934 * outlined above. The common case, of course, is that cpu_lock is
14935 * _not_ held -- so we delay here for a clock tick, hoping that that's
14936 * long enough for the task queue to do its work. If it's not, it's
14937 * not a serious problem -- it just means that the module that we
14938 * just loaded may not be immediately instrumentable.
14939 */
14940 xdelay(1);
14941 }
14942
14943 static void
14944 dtrace_module_unloaded(modctl_t *ctl)
14945 {
14946 dtrace_probe_t template, *probe, *first, *next;
14947 dtrace_provider_t *prov;
14948
14949 template.dtpr_mod = ctl->mod_modname;
14950
14951 mutex_enter(&dtrace_provider_lock);
14952 mutex_enter(&mod_lock);
14953 mutex_enter(&dtrace_lock);
14954
14955 if (dtrace_bymod == NULL) {
14956 /*
14957 * The DTrace module is loaded (obviously) but not attached;
14958 * we don't have any work to do.
14959 */
14960 mutex_exit(&dtrace_provider_lock);
14961 mutex_exit(&mod_lock);
14962 mutex_exit(&dtrace_lock);
14963 return;
14964 }
14965
14966 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14967 probe != NULL; probe = probe->dtpr_nextmod) {
14968 if (probe->dtpr_ecb != NULL) {
14969 mutex_exit(&dtrace_provider_lock);
14970 mutex_exit(&mod_lock);
14971 mutex_exit(&dtrace_lock);
14972
14973 /*
14974 * This shouldn't _actually_ be possible -- we're
14975 * unloading a module that has an enabled probe in it.
14976 * (It's normally up to the provider to make sure that
14977 * this can't happen.) However, because dtps_enable()
14978 * doesn't have a failure mode, there can be an
14979 * enable/unload race. Upshot: we don't want to
14980 * assert, but we're not going to disable the
14981 * probe, either.
14982 */
14983 if (dtrace_err_verbose) {
14984 cmn_err(CE_WARN, "unloaded module '%s' had "
14985 "enabled probes", ctl->mod_modname);
14986 }
14987
14988 return;
14989 }
14990 }
14991
14992 probe = first;
14993
14994 for (first = NULL; probe != NULL; probe = next) {
14995 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14996
14997 dtrace_probes[probe->dtpr_id - 1] = NULL;
14998
14999 next = probe->dtpr_nextmod;
15000 dtrace_hash_remove(dtrace_bymod, probe);
15001 dtrace_hash_remove(dtrace_byfunc, probe);
15002 dtrace_hash_remove(dtrace_byname, probe);
15003
15004 if (first == NULL) {
15005 first = probe;
15006 probe->dtpr_nextmod = NULL;
15007 } else {
15008 probe->dtpr_nextmod = first;
15009 first = probe;
15010 }
15011 }
15012
15013 /*
15014 * We've removed all of the module's probes from the hash chains and
15015 * from the probe array. Now issue a dtrace_sync() to be sure that
15016 * everyone has cleared out from any probe array processing.
15017 */
15018 dtrace_sync();
15019
15020 for (probe = first; probe != NULL; probe = first) {
15021 first = probe->dtpr_nextmod;
15022 prov = probe->dtpr_provider;
15023 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15024 probe->dtpr_arg);
15025 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15026 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15027 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15028 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15029 kmem_free(probe, sizeof (dtrace_probe_t));
15030 }
15031
15032 mutex_exit(&dtrace_lock);
15033 mutex_exit(&mod_lock);
15034 mutex_exit(&dtrace_provider_lock);
15035 }
15036
15037 static void
15038 dtrace_suspend(void)
15039 {
15040 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15041 }
15042
15043 static void
15044 dtrace_resume(void)
15045 {
15046 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15047 }
15048 #endif
15049
15050 static int
15051 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15052 {
15053 ASSERT(MUTEX_HELD(&cpu_lock));
15054 mutex_enter(&dtrace_lock);
15055
15056 switch (what) {
15057 case CPU_CONFIG: {
15058 dtrace_state_t *state;
15059 dtrace_optval_t *opt, rs, c;
15060
15061 /*
15062 * For now, we only allocate a new buffer for anonymous state.
15063 */
15064 if ((state = dtrace_anon.dta_state) == NULL)
15065 break;
15066
15067 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15068 break;
15069
15070 opt = state->dts_options;
15071 c = opt[DTRACEOPT_CPU];
15072
15073 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15074 break;
15075
15076 /*
15077 * Regardless of what the actual policy is, we're going to
15078 * temporarily set our resize policy to be manual. We're
15079 * also going to temporarily set our CPU option to denote
15080 * the newly configured CPU.
15081 */
15082 rs = opt[DTRACEOPT_BUFRESIZE];
15083 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15084 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15085
15086 (void) dtrace_state_buffers(state);
15087
15088 opt[DTRACEOPT_BUFRESIZE] = rs;
15089 opt[DTRACEOPT_CPU] = c;
15090
15091 break;
15092 }
15093
15094 case CPU_UNCONFIG:
15095 /*
15096 * We don't free the buffer in the CPU_UNCONFIG case. (The
15097 * buffer will be freed when the consumer exits.)
15098 */
15099 break;
15100
15101 default:
15102 break;
15103 }
15104
15105 mutex_exit(&dtrace_lock);
15106 return (0);
15107 }
15108
15109 #if defined(sun)
15110 static void
15111 dtrace_cpu_setup_initial(processorid_t cpu)
15112 {
15113 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15114 }
15115 #endif
15116
15117 static void
15118 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15119 {
15120 if (dtrace_toxranges >= dtrace_toxranges_max) {
15121 int osize, nsize;
15122 dtrace_toxrange_t *range;
15123
15124 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15125
15126 if (osize == 0) {
15127 ASSERT(dtrace_toxrange == NULL);
15128 ASSERT(dtrace_toxranges_max == 0);
15129 dtrace_toxranges_max = 1;
15130 } else {
15131 dtrace_toxranges_max <<= 1;
15132 }
15133
15134 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15135 range = kmem_zalloc(nsize, KM_SLEEP);
15136
15137 if (dtrace_toxrange != NULL) {
15138 ASSERT(osize != 0);
15139 bcopy(dtrace_toxrange, range, osize);
15140 kmem_free(dtrace_toxrange, osize);
15141 }
15142
15143 dtrace_toxrange = range;
15144 }
15145
15146 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
15147 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
15148
15149 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15150 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15151 dtrace_toxranges++;
15152 }
15153
15154 /*
15155 * DTrace Driver Cookbook Functions
15156 */
15157 #if defined(sun)
15158 /*ARGSUSED*/
15159 static int
15160 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15161 {
15162 dtrace_provider_id_t id;
15163 dtrace_state_t *state = NULL;
15164 dtrace_enabling_t *enab;
15165
15166 mutex_enter(&cpu_lock);
15167 mutex_enter(&dtrace_provider_lock);
15168 mutex_enter(&dtrace_lock);
15169
15170 if (ddi_soft_state_init(&dtrace_softstate,
15171 sizeof (dtrace_state_t), 0) != 0) {
15172 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15173 mutex_exit(&cpu_lock);
15174 mutex_exit(&dtrace_provider_lock);
15175 mutex_exit(&dtrace_lock);
15176 return (DDI_FAILURE);
15177 }
15178
15179 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15180 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15181 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15182 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15183 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15184 ddi_remove_minor_node(devi, NULL);
15185 ddi_soft_state_fini(&dtrace_softstate);
15186 mutex_exit(&cpu_lock);
15187 mutex_exit(&dtrace_provider_lock);
15188 mutex_exit(&dtrace_lock);
15189 return (DDI_FAILURE);
15190 }
15191
15192 ddi_report_dev(devi);
15193 dtrace_devi = devi;
15194
15195 dtrace_modload = dtrace_module_loaded;
15196 dtrace_modunload = dtrace_module_unloaded;
15197 dtrace_cpu_init = dtrace_cpu_setup_initial;
15198 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15199 dtrace_helpers_fork = dtrace_helpers_duplicate;
15200 dtrace_cpustart_init = dtrace_suspend;
15201 dtrace_cpustart_fini = dtrace_resume;
15202 dtrace_debugger_init = dtrace_suspend;
15203 dtrace_debugger_fini = dtrace_resume;
15204
15205 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15206
15207 ASSERT(MUTEX_HELD(&cpu_lock));
15208
15209 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15210 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15211 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15212 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15213 VM_SLEEP | VMC_IDENTIFIER);
15214 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15215 1, INT_MAX, 0);
15216
15217 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15218 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15219 NULL, NULL, NULL, NULL, NULL, 0);
15220
15221 ASSERT(MUTEX_HELD(&cpu_lock));
15222 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15223 offsetof(dtrace_probe_t, dtpr_nextmod),
15224 offsetof(dtrace_probe_t, dtpr_prevmod));
15225
15226 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15227 offsetof(dtrace_probe_t, dtpr_nextfunc),
15228 offsetof(dtrace_probe_t, dtpr_prevfunc));
15229
15230 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15231 offsetof(dtrace_probe_t, dtpr_nextname),
15232 offsetof(dtrace_probe_t, dtpr_prevname));
15233
15234 if (dtrace_retain_max < 1) {
15235 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15236 "setting to 1", dtrace_retain_max);
15237 dtrace_retain_max = 1;
15238 }
15239
15240 /*
15241 * Now discover our toxic ranges.
15242 */
15243 dtrace_toxic_ranges(dtrace_toxrange_add);
15244
15245 /*
15246 * Before we register ourselves as a provider to our own framework,
15247 * we would like to assert that dtrace_provider is NULL -- but that's
15248 * not true if we were loaded as a dependency of a DTrace provider.
15249 * Once we've registered, we can assert that dtrace_provider is our
15250 * pseudo provider.
15251 */
15252 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15253 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15254
15255 ASSERT(dtrace_provider != NULL);
15256 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15257
15258 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15259 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15260 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15261 dtrace_provider, NULL, NULL, "END", 0, NULL);
15262 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15263 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15264
15265 dtrace_anon_property();
15266 mutex_exit(&cpu_lock);
15267
15268 /*
15269 * If DTrace helper tracing is enabled, we need to allocate the
15270 * trace buffer and initialize the values.
15271 */
15272 if (dtrace_helptrace_enabled) {
15273 ASSERT(dtrace_helptrace_buffer == NULL);
15274 dtrace_helptrace_buffer =
15275 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15276 dtrace_helptrace_next = 0;
15277 }
15278
15279 /*
15280 * If there are already providers, we must ask them to provide their
15281 * probes, and then match any anonymous enabling against them. Note
15282 * that there should be no other retained enablings at this time:
15283 * the only retained enablings at this time should be the anonymous
15284 * enabling.
15285 */
15286 if (dtrace_anon.dta_enabling != NULL) {
15287 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15288
15289 dtrace_enabling_provide(NULL);
15290 state = dtrace_anon.dta_state;
15291
15292 /*
15293 * We couldn't hold cpu_lock across the above call to
15294 * dtrace_enabling_provide(), but we must hold it to actually
15295 * enable the probes. We have to drop all of our locks, pick
15296 * up cpu_lock, and regain our locks before matching the
15297 * retained anonymous enabling.
15298 */
15299 mutex_exit(&dtrace_lock);
15300 mutex_exit(&dtrace_provider_lock);
15301
15302 mutex_enter(&cpu_lock);
15303 mutex_enter(&dtrace_provider_lock);
15304 mutex_enter(&dtrace_lock);
15305
15306 if ((enab = dtrace_anon.dta_enabling) != NULL)
15307 (void) dtrace_enabling_match(enab, NULL);
15308
15309 mutex_exit(&cpu_lock);
15310 }
15311
15312 mutex_exit(&dtrace_lock);
15313 mutex_exit(&dtrace_provider_lock);
15314
15315 if (state != NULL) {
15316 /*
15317 * If we created any anonymous state, set it going now.
15318 */
15319 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15320 }
15321
15322 return (DDI_SUCCESS);
15323 }
15324 #endif
15325
15326 #if !defined(sun)
15327 #if __FreeBSD_version >= 800039
15328 static void
15329 dtrace_dtr(void *data __unused)
15330 {
15331 }
15332 #endif
15333 #endif
15334
15335 #if !defined(sun)
15336 static dev_type_open(dtrace_open);
15337
15338 /* Pseudo Device Entry points */
15339 /* Just opens, clones to the fileops below */
15340 const struct cdevsw dtrace_cdevsw = {
15341 .d_open = dtrace_open,
15342 .d_close = noclose,
15343 .d_read = noread,
15344 .d_write = nowrite,
15345 .d_ioctl = noioctl,
15346 .d_stop = nostop,
15347 .d_tty = notty,
15348 .d_poll = nopoll,
15349 .d_mmap = nommap,
15350 .d_kqfilter = nokqfilter,
15351 .d_discard = nodiscard,
15352 .d_flag = D_OTHER | D_MPSAFE
15353 };
15354
15355 static int dtrace_ioctl(struct file *fp, u_long cmd, void *data);
15356 static int dtrace_close(struct file *fp);
15357
15358 static const struct fileops dtrace_fileops = {
15359 .fo_read = fbadop_read,
15360 .fo_write = fbadop_write,
15361 .fo_ioctl = dtrace_ioctl,
15362 .fo_fcntl = fnullop_fcntl,
15363 .fo_poll = fnullop_poll,
15364 .fo_stat = fbadop_stat,
15365 .fo_close = dtrace_close,
15366 .fo_kqfilter = fnullop_kqfilter,
15367 };
15368 #endif
15369
15370 /*ARGSUSED*/
15371 static int
15372 #if defined(sun)
15373 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15374 #else
15375 dtrace_open(dev_t dev, int flags, int mode, struct lwp *l)
15376 #endif
15377 {
15378 dtrace_state_t *state;
15379 uint32_t priv;
15380 uid_t uid;
15381 zoneid_t zoneid;
15382
15383 #if defined(sun)
15384 if (getminor(*devp) == DTRACEMNRN_HELPER)
15385 return (0);
15386
15387 /*
15388 * If this wasn't an open with the "helper" minor, then it must be
15389 * the "dtrace" minor.
15390 */
15391 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
15392 #else
15393 cred_t *cred_p = NULL;
15394 struct file *fp;
15395 int fd;
15396 int res;
15397
15398 if ((res = fd_allocfile(&fp, &fd)) != 0)
15399 return res;
15400 #if 0
15401 #if __FreeBSD_version < 800039
15402 /*
15403 * The first minor device is the one that is cloned so there is
15404 * nothing more to do here.
15405 */
15406 if (dev2unit(dev) == 0)
15407 return 0;
15408
15409 /*
15410 * Devices are cloned, so if the DTrace state has already
15411 * been allocated, that means this device belongs to a
15412 * different client. Each client should open '/dev/dtrace'
15413 * to get a cloned device.
15414 */
15415 if (dev->si_drv1 != NULL)
15416 return (EBUSY);
15417 #endif
15418
15419 cred_p = dev->si_cred;
15420 #endif
15421 cred_p = l->l_cred;
15422 #endif
15423
15424 /*
15425 * If no DTRACE_PRIV_* bits are set in the credential, then the
15426 * caller lacks sufficient permission to do anything with DTrace.
15427 */
15428 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15429 if (priv == DTRACE_PRIV_NONE) {
15430 return (EACCES);
15431 }
15432
15433 /*
15434 * Ask all providers to provide all their probes.
15435 */
15436 mutex_enter(&dtrace_provider_lock);
15437 dtrace_probe_provide(NULL, NULL);
15438 mutex_exit(&dtrace_provider_lock);
15439
15440 mutex_enter(&cpu_lock);
15441 mutex_enter(&dtrace_lock);
15442 dtrace_opens++;
15443 dtrace_membar_producer();
15444
15445 #if defined(sun)
15446 /*
15447 * If the kernel debugger is active (that is, if the kernel debugger
15448 * modified text in some way), we won't allow the open.
15449 */
15450 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15451 dtrace_opens--;
15452 mutex_exit(&cpu_lock);
15453 mutex_exit(&dtrace_lock);
15454 return (EBUSY);
15455 }
15456
15457 state = dtrace_state_create(devp, cred_p);
15458 #else
15459 state = dtrace_state_create(dev, cred_p);
15460 #endif
15461
15462 mutex_exit(&cpu_lock);
15463
15464 if (state == NULL) {
15465 #if defined(sun)
15466 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15467 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15468 #else
15469 --dtrace_opens;
15470 #endif
15471 mutex_exit(&dtrace_lock);
15472 return (EAGAIN);
15473 }
15474
15475 mutex_exit(&dtrace_lock);
15476
15477 #if defined(sun)
15478 return (0);
15479 #else
15480 return fd_clone(fp, fd, flags, &dtrace_fileops, state);
15481 #endif
15482 }
15483
15484 /*ARGSUSED*/
15485 static int
15486 #if defined(sun)
15487 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15488 #else
15489 dtrace_close(struct file *fp)
15490 #endif
15491 {
15492 #if defined(sun)
15493 minor_t minor = getminor(dev);
15494 dtrace_state_t *state;
15495
15496 if (minor == DTRACEMNRN_HELPER)
15497 return (0);
15498
15499 state = ddi_get_soft_state(dtrace_softstate, minor);
15500 #else
15501 dtrace_state_t *state = (dtrace_state_t *)fp->f_data;
15502 #endif
15503
15504 mutex_enter(&cpu_lock);
15505 mutex_enter(&dtrace_lock);
15506
15507 if (state != NULL) {
15508 if (state->dts_anon) {
15509 /*
15510 * There is anonymous state. Destroy that first.
15511 */
15512 ASSERT(dtrace_anon.dta_state == NULL);
15513 dtrace_state_destroy(state->dts_anon);
15514 }
15515
15516 dtrace_state_destroy(state);
15517
15518 #if !defined(sun)
15519 fp->f_data = NULL;
15520 #endif
15521 }
15522
15523 ASSERT(dtrace_opens > 0);
15524 #if defined(sun)
15525 /*
15526 * Only relinquish control of the kernel debugger interface when there
15527 * are no consumers and no anonymous enablings.
15528 */
15529 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15530 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15531 #else
15532 --dtrace_opens;
15533 #endif
15534
15535 mutex_exit(&dtrace_lock);
15536 mutex_exit(&cpu_lock);
15537
15538 return (0);
15539 }
15540
15541 #if defined(sun)
15542 /*ARGSUSED*/
15543 static int
15544 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15545 {
15546 int rval;
15547 dof_helper_t help, *dhp = NULL;
15548
15549 switch (cmd) {
15550 case DTRACEHIOC_ADDDOF:
15551 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15552 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15553 return (EFAULT);
15554 }
15555
15556 dhp = &help;
15557 arg = (intptr_t)help.dofhp_dof;
15558 /*FALLTHROUGH*/
15559
15560 case DTRACEHIOC_ADD: {
15561 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15562
15563 if (dof == NULL)
15564 return (rval);
15565
15566 mutex_enter(&dtrace_lock);
15567
15568 /*
15569 * dtrace_helper_slurp() takes responsibility for the dof --
15570 * it may free it now or it may save it and free it later.
15571 */
15572 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15573 *rv = rval;
15574 rval = 0;
15575 } else {
15576 rval = EINVAL;
15577 }
15578
15579 mutex_exit(&dtrace_lock);
15580 return (rval);
15581 }
15582
15583 case DTRACEHIOC_REMOVE: {
15584 mutex_enter(&dtrace_lock);
15585 rval = dtrace_helper_destroygen(arg);
15586 mutex_exit(&dtrace_lock);
15587
15588 return (rval);
15589 }
15590
15591 default:
15592 break;
15593 }
15594
15595 return (ENOTTY);
15596 }
15597
15598 /*ARGSUSED*/
15599 static int
15600 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15601 {
15602 minor_t minor = getminor(dev);
15603 dtrace_state_t *state;
15604 int rval;
15605
15606 if (minor == DTRACEMNRN_HELPER)
15607 return (dtrace_ioctl_helper(cmd, arg, rv));
15608
15609 state = ddi_get_soft_state(dtrace_softstate, minor);
15610
15611 if (state->dts_anon) {
15612 ASSERT(dtrace_anon.dta_state == NULL);
15613 state = state->dts_anon;
15614 }
15615
15616 switch (cmd) {
15617 case DTRACEIOC_PROVIDER: {
15618 dtrace_providerdesc_t pvd;
15619 dtrace_provider_t *pvp;
15620
15621 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15622 return (EFAULT);
15623
15624 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15625 mutex_enter(&dtrace_provider_lock);
15626
15627 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15628 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15629 break;
15630 }
15631
15632 mutex_exit(&dtrace_provider_lock);
15633
15634 if (pvp == NULL)
15635 return (ESRCH);
15636
15637 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15638 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15639
15640 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15641 return (EFAULT);
15642
15643 return (0);
15644 }
15645
15646 case DTRACEIOC_EPROBE: {
15647 dtrace_eprobedesc_t epdesc;
15648 dtrace_ecb_t *ecb;
15649 dtrace_action_t *act;
15650 void *buf;
15651 size_t size;
15652 uintptr_t dest;
15653 int nrecs;
15654
15655 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15656 return (EFAULT);
15657
15658 mutex_enter(&dtrace_lock);
15659
15660 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15661 mutex_exit(&dtrace_lock);
15662 return (EINVAL);
15663 }
15664
15665 if (ecb->dte_probe == NULL) {
15666 mutex_exit(&dtrace_lock);
15667 return (EINVAL);
15668 }
15669
15670 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15671 epdesc.dtepd_uarg = ecb->dte_uarg;
15672 epdesc.dtepd_size = ecb->dte_size;
15673
15674 nrecs = epdesc.dtepd_nrecs;
15675 epdesc.dtepd_nrecs = 0;
15676 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15677 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15678 continue;
15679
15680 epdesc.dtepd_nrecs++;
15681 }
15682
15683 /*
15684 * Now that we have the size, we need to allocate a temporary
15685 * buffer in which to store the complete description. We need
15686 * the temporary buffer to be able to drop dtrace_lock()
15687 * across the copyout(), below.
15688 */
15689 size = sizeof (dtrace_eprobedesc_t) +
15690 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15691
15692 buf = kmem_alloc(size, KM_SLEEP);
15693 dest = (uintptr_t)buf;
15694
15695 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15696 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15697
15698 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15699 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15700 continue;
15701
15702 if (nrecs-- == 0)
15703 break;
15704
15705 bcopy(&act->dta_rec, (void *)dest,
15706 sizeof (dtrace_recdesc_t));
15707 dest += sizeof (dtrace_recdesc_t);
15708 }
15709
15710 mutex_exit(&dtrace_lock);
15711
15712 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15713 kmem_free(buf, size);
15714 return (EFAULT);
15715 }
15716
15717 kmem_free(buf, size);
15718 return (0);
15719 }
15720
15721 case DTRACEIOC_AGGDESC: {
15722 dtrace_aggdesc_t aggdesc;
15723 dtrace_action_t *act;
15724 dtrace_aggregation_t *agg;
15725 int nrecs;
15726 uint32_t offs;
15727 dtrace_recdesc_t *lrec;
15728 void *buf;
15729 size_t size;
15730 uintptr_t dest;
15731
15732 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15733 return (EFAULT);
15734
15735 mutex_enter(&dtrace_lock);
15736
15737 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15738 mutex_exit(&dtrace_lock);
15739 return (EINVAL);
15740 }
15741
15742 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15743
15744 nrecs = aggdesc.dtagd_nrecs;
15745 aggdesc.dtagd_nrecs = 0;
15746
15747 offs = agg->dtag_base;
15748 lrec = &agg->dtag_action.dta_rec;
15749 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15750
15751 for (act = agg->dtag_first; ; act = act->dta_next) {
15752 ASSERT(act->dta_intuple ||
15753 DTRACEACT_ISAGG(act->dta_kind));
15754
15755 /*
15756 * If this action has a record size of zero, it
15757 * denotes an argument to the aggregating action.
15758 * Because the presence of this record doesn't (or
15759 * shouldn't) affect the way the data is interpreted,
15760 * we don't copy it out to save user-level the
15761 * confusion of dealing with a zero-length record.
15762 */
15763 if (act->dta_rec.dtrd_size == 0) {
15764 ASSERT(agg->dtag_hasarg);
15765 continue;
15766 }
15767
15768 aggdesc.dtagd_nrecs++;
15769
15770 if (act == &agg->dtag_action)
15771 break;
15772 }
15773
15774 /*
15775 * Now that we have the size, we need to allocate a temporary
15776 * buffer in which to store the complete description. We need
15777 * the temporary buffer to be able to drop dtrace_lock()
15778 * across the copyout(), below.
15779 */
15780 size = sizeof (dtrace_aggdesc_t) +
15781 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15782
15783 buf = kmem_alloc(size, KM_SLEEP);
15784 dest = (uintptr_t)buf;
15785
15786 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15787 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15788
15789 for (act = agg->dtag_first; ; act = act->dta_next) {
15790 dtrace_recdesc_t rec = act->dta_rec;
15791
15792 /*
15793 * See the comment in the above loop for why we pass
15794 * over zero-length records.
15795 */
15796 if (rec.dtrd_size == 0) {
15797 ASSERT(agg->dtag_hasarg);
15798 continue;
15799 }
15800
15801 if (nrecs-- == 0)
15802 break;
15803
15804 rec.dtrd_offset -= offs;
15805 bcopy(&rec, (void *)dest, sizeof (rec));
15806 dest += sizeof (dtrace_recdesc_t);
15807
15808 if (act == &agg->dtag_action)
15809 break;
15810 }
15811
15812 mutex_exit(&dtrace_lock);
15813
15814 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15815 kmem_free(buf, size);
15816 return (EFAULT);
15817 }
15818
15819 kmem_free(buf, size);
15820 return (0);
15821 }
15822
15823 case DTRACEIOC_ENABLE: {
15824 dof_hdr_t *dof;
15825 dtrace_enabling_t *enab = NULL;
15826 dtrace_vstate_t *vstate;
15827 int err = 0;
15828
15829 *rv = 0;
15830
15831 /*
15832 * If a NULL argument has been passed, we take this as our
15833 * cue to reevaluate our enablings.
15834 */
15835 if (arg == NULL) {
15836 dtrace_enabling_matchall();
15837
15838 return (0);
15839 }
15840
15841 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15842 return (rval);
15843
15844 mutex_enter(&cpu_lock);
15845 mutex_enter(&dtrace_lock);
15846 vstate = &state->dts_vstate;
15847
15848 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15849 mutex_exit(&dtrace_lock);
15850 mutex_exit(&cpu_lock);
15851 dtrace_dof_destroy(dof);
15852 return (EBUSY);
15853 }
15854
15855 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15856 mutex_exit(&dtrace_lock);
15857 mutex_exit(&cpu_lock);
15858 dtrace_dof_destroy(dof);
15859 return (EINVAL);
15860 }
15861
15862 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15863 dtrace_enabling_destroy(enab);
15864 mutex_exit(&dtrace_lock);
15865 mutex_exit(&cpu_lock);
15866 dtrace_dof_destroy(dof);
15867 return (rval);
15868 }
15869
15870 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15871 err = dtrace_enabling_retain(enab);
15872 } else {
15873 dtrace_enabling_destroy(enab);
15874 }
15875
15876 mutex_exit(&cpu_lock);
15877 mutex_exit(&dtrace_lock);
15878 dtrace_dof_destroy(dof);
15879
15880 return (err);
15881 }
15882
15883 case DTRACEIOC_REPLICATE: {
15884 dtrace_repldesc_t desc;
15885 dtrace_probedesc_t *match = &desc.dtrpd_match;
15886 dtrace_probedesc_t *create = &desc.dtrpd_create;
15887 int err;
15888
15889 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15890 return (EFAULT);
15891
15892 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15893 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15894 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15895 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15896
15897 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15898 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15899 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15900 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15901
15902 mutex_enter(&dtrace_lock);
15903 err = dtrace_enabling_replicate(state, match, create);
15904 mutex_exit(&dtrace_lock);
15905
15906 return (err);
15907 }
15908
15909 case DTRACEIOC_PROBEMATCH:
15910 case DTRACEIOC_PROBES: {
15911 dtrace_probe_t *probe = NULL;
15912 dtrace_probedesc_t desc;
15913 dtrace_probekey_t pkey;
15914 dtrace_id_t i;
15915 int m = 0;
15916 uint32_t priv;
15917 uid_t uid;
15918 zoneid_t zoneid;
15919
15920 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15921 return (EFAULT);
15922
15923 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15924 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15925 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15926 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15927
15928 /*
15929 * Before we attempt to match this probe, we want to give
15930 * all providers the opportunity to provide it.
15931 */
15932 if (desc.dtpd_id == DTRACE_IDNONE) {
15933 mutex_enter(&dtrace_provider_lock);
15934 dtrace_probe_provide(&desc, NULL);
15935 mutex_exit(&dtrace_provider_lock);
15936 desc.dtpd_id++;
15937 }
15938
15939 if (cmd == DTRACEIOC_PROBEMATCH) {
15940 dtrace_probekey(&desc, &pkey);
15941 pkey.dtpk_id = DTRACE_IDNONE;
15942 }
15943
15944 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
15945
15946 mutex_enter(&dtrace_lock);
15947
15948 if (cmd == DTRACEIOC_PROBEMATCH) {
15949 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15950 if ((probe = dtrace_probes[i - 1]) != NULL &&
15951 (m = dtrace_match_probe(probe, &pkey,
15952 priv, uid, zoneid)) != 0)
15953 break;
15954 }
15955
15956 if (m < 0) {
15957 mutex_exit(&dtrace_lock);
15958 return (EINVAL);
15959 }
15960
15961 } else {
15962 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15963 if ((probe = dtrace_probes[i - 1]) != NULL &&
15964 dtrace_match_priv(probe, priv, uid, zoneid))
15965 break;
15966 }
15967 }
15968
15969 if (probe == NULL) {
15970 mutex_exit(&dtrace_lock);
15971 return (ESRCH);
15972 }
15973
15974 dtrace_probe_description(probe, &desc);
15975 mutex_exit(&dtrace_lock);
15976
15977 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15978 return (EFAULT);
15979
15980 return (0);
15981 }
15982
15983 case DTRACEIOC_PROBEARG: {
15984 dtrace_argdesc_t desc;
15985 dtrace_probe_t *probe;
15986 dtrace_provider_t *prov;
15987
15988 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15989 return (EFAULT);
15990
15991 if (desc.dtargd_id == DTRACE_IDNONE)
15992 return (EINVAL);
15993
15994 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15995 return (EINVAL);
15996
15997 mutex_enter(&dtrace_provider_lock);
15998 mutex_enter(&mod_lock);
15999 mutex_enter(&dtrace_lock);
16000
16001 if (desc.dtargd_id > dtrace_nprobes) {
16002 mutex_exit(&dtrace_lock);
16003 mutex_exit(&mod_lock);
16004 mutex_exit(&dtrace_provider_lock);
16005 return (EINVAL);
16006 }
16007
16008 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16009 mutex_exit(&dtrace_lock);
16010 mutex_exit(&mod_lock);
16011 mutex_exit(&dtrace_provider_lock);
16012 return (EINVAL);
16013 }
16014
16015 mutex_exit(&dtrace_lock);
16016
16017 prov = probe->dtpr_provider;
16018
16019 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16020 /*
16021 * There isn't any typed information for this probe.
16022 * Set the argument number to DTRACE_ARGNONE.
16023 */
16024 desc.dtargd_ndx = DTRACE_ARGNONE;
16025 } else {
16026 desc.dtargd_native[0] = '\0';
16027 desc.dtargd_xlate[0] = '\0';
16028 desc.dtargd_mapping = desc.dtargd_ndx;
16029
16030 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16031 probe->dtpr_id, probe->dtpr_arg, &desc);
16032 }
16033
16034 mutex_exit(&mod_lock);
16035 mutex_exit(&dtrace_provider_lock);
16036
16037 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16038 return (EFAULT);
16039
16040 return (0);
16041 }
16042
16043 case DTRACEIOC_GO: {
16044 processorid_t cpuid;
16045 rval = dtrace_state_go(state, &cpuid);
16046
16047 if (rval != 0)
16048 return (rval);
16049
16050 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16051 return (EFAULT);
16052
16053 return (0);
16054 }
16055
16056 case DTRACEIOC_STOP: {
16057 processorid_t cpuid;
16058
16059 mutex_enter(&dtrace_lock);
16060 rval = dtrace_state_stop(state, &cpuid);
16061 mutex_exit(&dtrace_lock);
16062
16063 if (rval != 0)
16064 return (rval);
16065
16066 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16067 return (EFAULT);
16068
16069 return (0);
16070 }
16071
16072 case DTRACEIOC_DOFGET: {
16073 dof_hdr_t hdr, *dof;
16074 uint64_t len;
16075
16076 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16077 return (EFAULT);
16078
16079 mutex_enter(&dtrace_lock);
16080 dof = dtrace_dof_create(state);
16081 mutex_exit(&dtrace_lock);
16082
16083 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16084 rval = copyout(dof, (void *)arg, len);
16085 dtrace_dof_destroy(dof);
16086
16087 return (rval == 0 ? 0 : EFAULT);
16088 }
16089
16090 case DTRACEIOC_AGGSNAP:
16091 case DTRACEIOC_BUFSNAP: {
16092 dtrace_bufdesc_t desc;
16093 caddr_t cached;
16094 dtrace_buffer_t *buf;
16095
16096 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16097 return (EFAULT);
16098
16099 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16100 return (EINVAL);
16101
16102 mutex_enter(&dtrace_lock);
16103
16104 if (cmd == DTRACEIOC_BUFSNAP) {
16105 buf = &state->dts_buffer[desc.dtbd_cpu];
16106 } else {
16107 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16108 }
16109
16110 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16111 size_t sz = buf->dtb_offset;
16112
16113 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16114 mutex_exit(&dtrace_lock);
16115 return (EBUSY);
16116 }
16117
16118 /*
16119 * If this buffer has already been consumed, we're
16120 * going to indicate that there's nothing left here
16121 * to consume.
16122 */
16123 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16124 mutex_exit(&dtrace_lock);
16125
16126 desc.dtbd_size = 0;
16127 desc.dtbd_drops = 0;
16128 desc.dtbd_errors = 0;
16129 desc.dtbd_oldest = 0;
16130 sz = sizeof (desc);
16131
16132 if (copyout(&desc, (void *)arg, sz) != 0)
16133 return (EFAULT);
16134
16135 return (0);
16136 }
16137
16138 /*
16139 * If this is a ring buffer that has wrapped, we want
16140 * to copy the whole thing out.
16141 */
16142 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16143 dtrace_buffer_polish(buf);
16144 sz = buf->dtb_size;
16145 }
16146
16147 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16148 mutex_exit(&dtrace_lock);
16149 return (EFAULT);
16150 }
16151
16152 desc.dtbd_size = sz;
16153 desc.dtbd_drops = buf->dtb_drops;
16154 desc.dtbd_errors = buf->dtb_errors;
16155 desc.dtbd_oldest = buf->dtb_xamot_offset;
16156
16157 mutex_exit(&dtrace_lock);
16158
16159 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16160 return (EFAULT);
16161
16162 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16163
16164 return (0);
16165 }
16166
16167 if (buf->dtb_tomax == NULL) {
16168 ASSERT(buf->dtb_xamot == NULL);
16169 mutex_exit(&dtrace_lock);
16170 return (ENOENT);
16171 }
16172
16173 cached = buf->dtb_tomax;
16174 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16175
16176 dtrace_xcall(desc.dtbd_cpu,
16177 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16178
16179 state->dts_errors += buf->dtb_xamot_errors;
16180
16181 /*
16182 * If the buffers did not actually switch, then the cross call
16183 * did not take place -- presumably because the given CPU is
16184 * not in the ready set. If this is the case, we'll return
16185 * ENOENT.
16186 */
16187 if (buf->dtb_tomax == cached) {
16188 ASSERT(buf->dtb_xamot != cached);
16189 mutex_exit(&dtrace_lock);
16190 return (ENOENT);
16191 }
16192
16193 ASSERT(cached == buf->dtb_xamot);
16194
16195 /*
16196 * We have our snapshot; now copy it out.
16197 */
16198 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16199 buf->dtb_xamot_offset) != 0) {
16200 mutex_exit(&dtrace_lock);
16201 return (EFAULT);
16202 }
16203
16204 desc.dtbd_size = buf->dtb_xamot_offset;
16205 desc.dtbd_drops = buf->dtb_xamot_drops;
16206 desc.dtbd_errors = buf->dtb_xamot_errors;
16207 desc.dtbd_oldest = 0;
16208
16209 mutex_exit(&dtrace_lock);
16210
16211 /*
16212 * Finally, copy out the buffer description.
16213 */
16214 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16215 return (EFAULT);
16216
16217 return (0);
16218 }
16219
16220 case DTRACEIOC_CONF: {
16221 dtrace_conf_t conf;
16222
16223 bzero(&conf, sizeof (conf));
16224 conf.dtc_difversion = DIF_VERSION;
16225 conf.dtc_difintregs = DIF_DIR_NREGS;
16226 conf.dtc_diftupregs = DIF_DTR_NREGS;
16227 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16228
16229 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16230 return (EFAULT);
16231
16232 return (0);
16233 }
16234
16235 case DTRACEIOC_STATUS: {
16236 dtrace_status_t stat;
16237 dtrace_dstate_t *dstate;
16238 int i, j;
16239 uint64_t nerrs;
16240
16241 /*
16242 * See the comment in dtrace_state_deadman() for the reason
16243 * for setting dts_laststatus to INT64_MAX before setting
16244 * it to the correct value.
16245 */
16246 state->dts_laststatus = INT64_MAX;
16247 dtrace_membar_producer();
16248 state->dts_laststatus = dtrace_gethrtime();
16249
16250 bzero(&stat, sizeof (stat));
16251
16252 mutex_enter(&dtrace_lock);
16253
16254 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16255 mutex_exit(&dtrace_lock);
16256 return (ENOENT);
16257 }
16258
16259 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16260 stat.dtst_exiting = 1;
16261
16262 nerrs = state->dts_errors;
16263 dstate = &state->dts_vstate.dtvs_dynvars;
16264
16265 for (i = 0; i < NCPU; i++) {
16266 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16267
16268 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16269 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16270 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16271
16272 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16273 stat.dtst_filled++;
16274
16275 nerrs += state->dts_buffer[i].dtb_errors;
16276
16277 for (j = 0; j < state->dts_nspeculations; j++) {
16278 dtrace_speculation_t *spec;
16279 dtrace_buffer_t *buf;
16280
16281 spec = &state->dts_speculations[j];
16282 buf = &spec->dtsp_buffer[i];
16283 stat.dtst_specdrops += buf->dtb_xamot_drops;
16284 }
16285 }
16286
16287 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16288 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16289 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16290 stat.dtst_dblerrors = state->dts_dblerrors;
16291 stat.dtst_killed =
16292 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16293 stat.dtst_errors = nerrs;
16294
16295 mutex_exit(&dtrace_lock);
16296
16297 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16298 return (EFAULT);
16299
16300 return (0);
16301 }
16302
16303 case DTRACEIOC_FORMAT: {
16304 dtrace_fmtdesc_t fmt;
16305 char *str;
16306 int len;
16307
16308 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16309 return (EFAULT);
16310
16311 mutex_enter(&dtrace_lock);
16312
16313 if (fmt.dtfd_format == 0 ||
16314 fmt.dtfd_format > state->dts_nformats) {
16315 mutex_exit(&dtrace_lock);
16316 return (EINVAL);
16317 }
16318
16319 /*
16320 * Format strings are allocated contiguously and they are
16321 * never freed; if a format index is less than the number
16322 * of formats, we can assert that the format map is non-NULL
16323 * and that the format for the specified index is non-NULL.
16324 */
16325 ASSERT(state->dts_formats != NULL);
16326 str = state->dts_formats[fmt.dtfd_format - 1];
16327 ASSERT(str != NULL);
16328
16329 len = strlen(str) + 1;
16330
16331 if (len > fmt.dtfd_length) {
16332 fmt.dtfd_length = len;
16333
16334 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16335 mutex_exit(&dtrace_lock);
16336 return (EINVAL);
16337 }
16338 } else {
16339 if (copyout(str, fmt.dtfd_string, len) != 0) {
16340 mutex_exit(&dtrace_lock);
16341 return (EINVAL);
16342 }
16343 }
16344
16345 mutex_exit(&dtrace_lock);
16346 return (0);
16347 }
16348
16349 default:
16350 break;
16351 }
16352
16353 return (ENOTTY);
16354 }
16355
16356 /*ARGSUSED*/
16357 static int
16358 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16359 {
16360 dtrace_state_t *state;
16361
16362 switch (cmd) {
16363 case DDI_DETACH:
16364 break;
16365
16366 case DDI_SUSPEND:
16367 return (DDI_SUCCESS);
16368
16369 default:
16370 return (DDI_FAILURE);
16371 }
16372
16373 mutex_enter(&cpu_lock);
16374 mutex_enter(&dtrace_provider_lock);
16375 mutex_enter(&dtrace_lock);
16376
16377 ASSERT(dtrace_opens == 0);
16378
16379 if (dtrace_helpers > 0) {
16380 mutex_exit(&dtrace_provider_lock);
16381 mutex_exit(&dtrace_lock);
16382 mutex_exit(&cpu_lock);
16383 return (DDI_FAILURE);
16384 }
16385
16386 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16387 mutex_exit(&dtrace_provider_lock);
16388 mutex_exit(&dtrace_lock);
16389 mutex_exit(&cpu_lock);
16390 return (DDI_FAILURE);
16391 }
16392
16393 dtrace_provider = NULL;
16394
16395 if ((state = dtrace_anon_grab()) != NULL) {
16396 /*
16397 * If there were ECBs on this state, the provider should
16398 * have not been allowed to detach; assert that there is
16399 * none.
16400 */
16401 ASSERT(state->dts_necbs == 0);
16402 dtrace_state_destroy(state);
16403
16404 /*
16405 * If we're being detached with anonymous state, we need to
16406 * indicate to the kernel debugger that DTrace is now inactive.
16407 */
16408 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16409 }
16410
16411 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16412 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16413 dtrace_cpu_init = NULL;
16414 dtrace_helpers_cleanup = NULL;
16415 dtrace_helpers_fork = NULL;
16416 dtrace_cpustart_init = NULL;
16417 dtrace_cpustart_fini = NULL;
16418 dtrace_debugger_init = NULL;
16419 dtrace_debugger_fini = NULL;
16420 dtrace_modload = NULL;
16421 dtrace_modunload = NULL;
16422
16423 mutex_exit(&cpu_lock);
16424
16425 if (dtrace_helptrace_enabled) {
16426 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16427 dtrace_helptrace_buffer = NULL;
16428 }
16429
16430 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16431 dtrace_probes = NULL;
16432 dtrace_nprobes = 0;
16433
16434 dtrace_hash_destroy(dtrace_bymod);
16435 dtrace_hash_destroy(dtrace_byfunc);
16436 dtrace_hash_destroy(dtrace_byname);
16437 dtrace_bymod = NULL;
16438 dtrace_byfunc = NULL;
16439 dtrace_byname = NULL;
16440
16441 kmem_cache_destroy(dtrace_state_cache);
16442 vmem_destroy(dtrace_minor);
16443 vmem_destroy(dtrace_arena);
16444
16445 if (dtrace_toxrange != NULL) {
16446 kmem_free(dtrace_toxrange,
16447 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16448 dtrace_toxrange = NULL;
16449 dtrace_toxranges = 0;
16450 dtrace_toxranges_max = 0;
16451 }
16452
16453 ddi_remove_minor_node(dtrace_devi, NULL);
16454 dtrace_devi = NULL;
16455
16456 ddi_soft_state_fini(&dtrace_softstate);
16457
16458 ASSERT(dtrace_vtime_references == 0);
16459 ASSERT(dtrace_opens == 0);
16460 ASSERT(dtrace_retained == NULL);
16461
16462 mutex_exit(&dtrace_lock);
16463 mutex_exit(&dtrace_provider_lock);
16464
16465 /*
16466 * We don't destroy the task queue until after we have dropped our
16467 * locks (taskq_destroy() may block on running tasks). To prevent
16468 * attempting to do work after we have effectively detached but before
16469 * the task queue has been destroyed, all tasks dispatched via the
16470 * task queue must check that DTrace is still attached before
16471 * performing any operation.
16472 */
16473 taskq_destroy(dtrace_taskq);
16474 dtrace_taskq = NULL;
16475
16476 return (DDI_SUCCESS);
16477 }
16478 #endif
16479
16480 #if defined(sun)
16481 /*ARGSUSED*/
16482 static int
16483 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16484 {
16485 int error;
16486
16487 switch (infocmd) {
16488 case DDI_INFO_DEVT2DEVINFO:
16489 *result = (void *)dtrace_devi;
16490 error = DDI_SUCCESS;
16491 break;
16492 case DDI_INFO_DEVT2INSTANCE:
16493 *result = (void *)0;
16494 error = DDI_SUCCESS;
16495 break;
16496 default:
16497 error = DDI_FAILURE;
16498 }
16499 return (error);
16500 }
16501 #endif
16502
16503 #if defined(sun)
16504 static struct cb_ops dtrace_cb_ops = {
16505 dtrace_open, /* open */
16506 dtrace_close, /* close */
16507 nulldev, /* strategy */
16508 nulldev, /* print */
16509 nodev, /* dump */
16510 nodev, /* read */
16511 nodev, /* write */
16512 dtrace_ioctl, /* ioctl */
16513 nodev, /* devmap */
16514 nodev, /* mmap */
16515 nodev, /* segmap */
16516 nochpoll, /* poll */
16517 ddi_prop_op, /* cb_prop_op */
16518 0, /* streamtab */
16519 D_NEW | D_MP /* Driver compatibility flag */
16520 };
16521
16522 static struct dev_ops dtrace_ops = {
16523 DEVO_REV, /* devo_rev */
16524 0, /* refcnt */
16525 dtrace_info, /* get_dev_info */
16526 nulldev, /* identify */
16527 nulldev, /* probe */
16528 dtrace_attach, /* attach */
16529 dtrace_detach, /* detach */
16530 nodev, /* reset */
16531 &dtrace_cb_ops, /* driver operations */
16532 NULL, /* bus operations */
16533 nodev /* dev power */
16534 };
16535
16536 static struct modldrv modldrv = {
16537 &mod_driverops, /* module type (this is a pseudo driver) */
16538 "Dynamic Tracing", /* name of module */
16539 &dtrace_ops, /* driver ops */
16540 };
16541
16542 static struct modlinkage modlinkage = {
16543 MODREV_1,
16544 (void *)&modldrv,
16545 NULL
16546 };
16547
16548 int
16549 _init(void)
16550 {
16551 return (mod_install(&modlinkage));
16552 }
16553
16554 int
16555 _info(struct modinfo *modinfop)
16556 {
16557 return (mod_info(&modlinkage, modinfop));
16558 }
16559
16560 int
16561 _fini(void)
16562 {
16563 return (mod_remove(&modlinkage));
16564 }
16565 #else
16566
16567 #if 0
16568 static d_ioctl_t dtrace_ioctl;
16569 static void dtrace_load(void *);
16570 static int dtrace_unload(void);
16571 #if __FreeBSD_version < 800039
16572 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
16573 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */
16574 static eventhandler_tag eh_tag; /* Event handler tag. */
16575 #else
16576 static struct cdev *dtrace_dev;
16577 #endif
16578
16579 void dtrace_invop_init(void);
16580 void dtrace_invop_uninit(void);
16581
16582 static struct cdevsw dtrace_cdevsw = {
16583 .d_version = D_VERSION,
16584 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16585 .d_close = dtrace_close,
16586 .d_ioctl = dtrace_ioctl,
16587 .d_open = dtrace_open,
16588 .d_name = "dtrace",
16589 };
16590 #endif
16591 void dtrace_invop_init(void);
16592 void dtrace_invop_uninit(void);
16593
16594 static void dtrace_load(void *);
16595 static int dtrace_unload(void);
16596
16597 #include <dtrace_anon.c>
16598 #include <dtrace_ioctl.c>
16599 #include <dtrace_load.c>
16600 #include <dtrace_modevent.c>
16601 #include <dtrace_sysctl.c>
16602 #include <dtrace_unload.c>
16603 #include <dtrace_vtime.c>
16604 #include <dtrace_hacks.c>
16605 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
16606 #include <dtrace_isa.c>
16607 #endif
16608
16609 MODULE(MODULE_CLASS_DRIVER, dtrace, "solaris");
16610
16611 #if 0
16612 DEV_MODULE(dtrace, dtrace_modevent, NULL);
16613 MODULE_VERSION(dtrace, 1);
16614 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
16615 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
16616 #endif
16617 #endif
16618
16619 #if !defined(sun)
16620 #undef mutex_init
16621
16622 struct dtrace_state_worker {
16623 kmutex_t lock;
16624 kcondvar_t cv;
16625 void (*fn)(dtrace_state_t *);
16626 dtrace_state_t *state;
16627 int interval;
16628 lwp_t *lwp;
16629 bool exiting;
16630 };
16631
16632 static void
16633 dtrace_state_worker_thread(void *vp)
16634 {
16635 struct dtrace_state_worker *w = vp;
16636
16637 mutex_enter(&w->lock);
16638 while (!w->exiting) {
16639 int error;
16640
16641 error = cv_timedwait(&w->cv, &w->lock, w->interval);
16642 if (error == EWOULDBLOCK) {
16643 mutex_exit(&w->lock);
16644 w->fn(w->state);
16645 mutex_enter(&w->lock);
16646 }
16647 }
16648 mutex_exit(&w->lock);
16649 kthread_exit(0);
16650 }
16651
16652 struct dtrace_state_worker *
16653 dtrace_state_worker_add(void (*fn)(dtrace_state_t *), dtrace_state_t *state,
16654 hrtime_t interval)
16655 {
16656 struct dtrace_state_worker *w;
16657 int error __diagused;
16658
16659 w = kmem_alloc(sizeof(*w), KM_SLEEP);
16660 mutex_init(&w->lock, MUTEX_DEFAULT, IPL_NONE);
16661 cv_init(&w->cv, "dtrace");
16662 w->interval = ((uintmax_t)hz * interval) / NANOSEC,
16663 w->fn = fn;
16664 w->state = state;
16665 w->exiting = false;
16666 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_MUSTJOIN, NULL,
16667 dtrace_state_worker_thread, w, &w->lwp, "dtrace-state-worker");
16668 KASSERT(error == 0); /* XXX */
16669 return w;
16670 }
16671
16672 void
16673 dtrace_state_worker_remove(struct dtrace_state_worker *w)
16674 {
16675 int error __diagused;
16676
16677 KASSERT(!w->exiting);
16678 mutex_enter(&w->lock);
16679 w->exiting = true;
16680 cv_signal(&w->cv);
16681 mutex_exit(&w->lock);
16682 error = kthread_join(w->lwp);
16683 KASSERT(error == 0);
16684 cv_destroy(&w->cv);
16685 mutex_destroy(&w->lock);
16686 kmem_free(w, sizeof(*w));
16687 }
16688 #endif
16689